repo_name
stringlengths
5
108
path
stringlengths
6
333
size
stringlengths
1
6
content
stringlengths
4
977k
license
stringclasses
15 values
muhd7rosli/quickstart
payment-cdi-event/src/main/java/org/jboss/as/quickstarts/payment/handler/IDebitEventObserver.java
1166
/* * JBoss, Home of Professional Open Source * Copyright 2013, Red Hat, Inc. and/or its affiliates, and individual * contributors by the @authors tag. See the copyright.txt in the * distribution for a full listing of individual contributors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jboss.as.quickstarts.payment.handler; /** * * @author Elvadas-Nono * */ import javax.enterprise.event.Observes; import org.jboss.as.quickstarts.payment.events.PaymentEvent; import org.jboss.as.quickstarts.payment.qualifiers.Debit; public interface IDebitEventObserver { public void onDebitPaymentEvent(@Observes @Debit PaymentEvent event); }
apache-2.0
jdahlstrom/vaadin.react
uitest/src/main/java/com/vaadin/tests/components/datefield/TestDatefieldYear.java
904
package com.vaadin.tests.components.datefield; import java.util.Date; import java.util.Locale; import com.vaadin.tests.components.TestBase; import com.vaadin.ui.DateField; public class TestDatefieldYear extends TestBase { @Override protected String getDescription() { return "A popup with resolution year or month should update the textfield when browsing. The value displayed in the textfield should always be the same as the popup shows."; } @Override protected Integer getTicketNumber() { return 2813; } @Override protected void setup() { @SuppressWarnings("deprecation") DateField df = new DateField("Year", new Date(2009 - 1900, 4 - 1, 1)); df.setLocale(new Locale("en", "US")); df.setResolution(DateField.RESOLUTION_YEAR); df.setResolution(DateField.RESOLUTION_MONTH); addComponent(df); } }
apache-2.0
tillrohrmann/flink
flink-examples/flink-examples-table/src/test/java/org/apache/flink/table/examples/scala/basics/GettingStartedExampleITCase.java
1521
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.examples.scala.basics; import org.apache.flink.table.examples.utils.ExampleOutputTestBase; import org.junit.Test; import static org.hamcrest.CoreMatchers.containsString; import static org.junit.Assert.assertThat; /** Test for Scala {@link GettingStartedExample}. */ public class GettingStartedExampleITCase extends ExampleOutputTestBase { @Test public void testExample() { GettingStartedExample.main(new String[0]); final String consoleOutput = getOutputString(); assertThat( consoleOutput, containsString("| 6 | 1979 |")); assertThat(consoleOutput, containsString("SUCCESS!")); } }
apache-2.0
idea4bsd/idea4bsd
platform/platform-api/src/com/intellij/openapi/diagnostic/ErrorReportSubmitter.java
3746
/* * Copyright 2000-2016 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.openapi.diagnostic; import com.intellij.openapi.extensions.PluginAware; import com.intellij.openapi.extensions.PluginDescriptor; import com.intellij.util.Consumer; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.awt.*; /** * This class should be extended by plugin vendor and provided by means of {@link com.intellij.ExtensionPoints#ERROR_HANDLER} if * reporting errors that happened in plugin code to vendor is desirable. */ public abstract class ErrorReportSubmitter implements PluginAware { private PluginDescriptor myPlugin; /** * Called by the framework. Allows to identify the plugin that provided this extension. */ @Override public void setPluginDescriptor(PluginDescriptor plugin) { myPlugin = plugin; } /** * @return plugin that provided this particular extension */ public PluginDescriptor getPluginDescriptor() { return myPlugin; } /** * @return an action text to be used in Error Reporter user interface, e.g. "Report to JetBrains". */ @NotNull public abstract String getReportActionText(); /** * This method is called whenever an exception in a plugin code had happened and a user decided to report a problem to the plugin vendor. * * @param events a non-empty sequence of error descriptors. * @param additionalInfo additional information provided by a user. * @param parentComponent UI component to use as a parent in any UI activity from a submitter. * @param consumer a callback to be called after sending is finished (or failed). * @return {@code true} if reporting was started, {@code false} if a report can't be sent at the moment. */ @SuppressWarnings("deprecation") public boolean submit(@NotNull IdeaLoggingEvent[] events, @Nullable String additionalInfo, @NotNull Component parentComponent, @NotNull Consumer<SubmittedReportInfo> consumer) { return trySubmitAsync(events, additionalInfo, parentComponent, consumer); } /** @deprecated implement {@link #submit(IdeaLoggingEvent[], String, Component, Consumer)} (to be removed in IDEA 16) */ @SuppressWarnings({"deprecation", "unused"}) public boolean trySubmitAsync(IdeaLoggingEvent[] events, String info, Component parent, Consumer<SubmittedReportInfo> consumer) { submitAsync(events, info, parent, consumer); return true; } /** @deprecated implement {@link #submit(IdeaLoggingEvent[], String, Component, Consumer)} (to be removed in IDEA 16) */ @SuppressWarnings({"deprecation", "unused"}) public void submitAsync(IdeaLoggingEvent[] events, String info, Component parent, Consumer<SubmittedReportInfo> consumer) { consumer.consume(submit(events, parent)); } /** @deprecated implement {@link #submit(IdeaLoggingEvent[], String, Component, Consumer)} (to be removed in IDEA 16) */ @SuppressWarnings({"deprecation", "unused"}) public SubmittedReportInfo submit(IdeaLoggingEvent[] events, Component parent) { throw new UnsupportedOperationException("Deprecated API called"); } }
apache-2.0
sarwarbhuiyan/elasticsearch
plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AzureComputeServiceTwoNodesMock.java
3842
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.cloud.azure; import com.microsoft.windowsazure.management.compute.models.*; import org.elasticsearch.cloud.azure.management.AzureComputeServiceAbstractMock; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; import java.net.InetAddress; /** * Mock Azure API with two started nodes */ public class AzureComputeServiceTwoNodesMock extends AzureComputeServiceAbstractMock { public static class TestPlugin extends Plugin { @Override public String name() { return "mock-compute-service"; } @Override public String description() { return "plugs in a mock compute service for testing"; } public void onModule(AzureModule azureModule) { azureModule.computeServiceImpl = AzureComputeServiceTwoNodesMock.class; } } NetworkService networkService; @Inject protected AzureComputeServiceTwoNodesMock(Settings settings, NetworkService networkService) { super(settings); this.networkService = networkService; } @Override public HostedServiceGetDetailedResponse getServiceDetails() { HostedServiceGetDetailedResponse response = new HostedServiceGetDetailedResponse(); HostedServiceGetDetailedResponse.Deployment deployment = new HostedServiceGetDetailedResponse.Deployment(); // Fake the deployment deployment.setName("dummy"); deployment.setDeploymentSlot(DeploymentSlot.Production); deployment.setStatus(DeploymentStatus.Running); // Fake a first instance RoleInstance instance1 = new RoleInstance(); instance1.setInstanceName("dummy1"); // Fake the private IP instance1.setIPAddress(InetAddress.getLoopbackAddress()); // Fake the public IP InstanceEndpoint endpoint1 = new InstanceEndpoint(); endpoint1.setName("elasticsearch"); endpoint1.setVirtualIPAddress(InetAddress.getLoopbackAddress()); endpoint1.setPort(9400); instance1.setInstanceEndpoints(CollectionUtils.newArrayList(endpoint1)); // Fake a first instance RoleInstance instance2 = new RoleInstance(); instance2.setInstanceName("dummy1"); // Fake the private IP instance2.setIPAddress(InetAddress.getLoopbackAddress()); // Fake the public IP InstanceEndpoint endpoint2 = new InstanceEndpoint(); endpoint2.setName("elasticsearch"); endpoint2.setVirtualIPAddress(InetAddress.getLoopbackAddress()); endpoint2.setPort(9401); instance2.setInstanceEndpoints(CollectionUtils.newArrayList(endpoint2)); deployment.setRoleInstances(CollectionUtils.newArrayList(instance1, instance2)); response.setDeployments(CollectionUtils.newArrayList(deployment)); return response; } }
apache-2.0
Buble1981/MyDroolsFork
drools-decisiontables/src/test/java/org/drools/decisiontable/parser/xls/ExcelParserTest.java
2519
/* * Copyright 2005 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.decisiontable.parser.xls; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import java.util.List; import java.util.Map; import org.apache.poi.ss.usermodel.Cell; import org.apache.poi.ss.usermodel.Sheet; import org.apache.poi.ss.usermodel.Workbook; import org.apache.poi.ss.util.CellRangeAddress; import org.apache.poi.xssf.usermodel.XSSFWorkbook; import org.drools.template.parser.DataListener; import org.junit.Test; /** * * Some unit tests for the corners of ExcelParser that are not explicitly * covered by integration tests. */ public class ExcelParserTest { private static final String LAST_CELL_VALUE = "last"; private static final String FIRST_CELL_CONTENT = "first"; /** * This should test to see if a cell is in a certain range or not. * If it is in a merged range, then it should return the top left cell. * @throws Exception */ @Test public void testCellMerge() throws Exception { ExcelParser parser = new ExcelParser((Map<String, List<DataListener>>) null); CellRangeAddress[] ranges = new CellRangeAddress[1]; Workbook workbook = new XSSFWorkbook(); Sheet sheet = workbook.createSheet(); Cell cell = sheet.createRow(2).createCell(2); ranges[0] = new CellRangeAddress(2, 7, 2, 5); cell.setCellValue(FIRST_CELL_CONTENT); cell = sheet.createRow(7).createCell(5); cell.setCellValue(LAST_CELL_VALUE); cell = sheet.createRow(1).createCell(1); assertNull(parser.getRangeIfMerged(cell, ranges)); cell = sheet.getRow(2).createCell(5); cell.setCellValue("wrong"); CellRangeAddress rangeIfMerged = parser.getRangeIfMerged(cell, ranges); assertEquals(FIRST_CELL_CONTENT, sheet.getRow(rangeIfMerged.getFirstRow()).getCell(rangeIfMerged.getFirstColumn()).getStringCellValue()); } }
apache-2.0
siosio/intellij-community
java/java-tests/testData/codeInsight/daemonCodeAnalyzer/quickFix/addSingleStaticImport/InaccessibleSuper_after.java
125
import foo.*; import static foo.Bar.foo; class Class2 { public static void main(String[] args) { f<caret>oo(); } }
apache-2.0
rocketballs/netty
testsuite/src/test/java/io/netty/testsuite/transport/socket/WriteBeforeRegisteredTest.java
1766
/* * Copyright 2013 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package io.netty.testsuite.transport.socket; import io.netty.bootstrap.Bootstrap; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.socket.SocketChannel; import org.junit.Test; public class WriteBeforeRegisteredTest extends AbstractClientSocketTest { @Test(timeout = 30000) public void testWriteBeforeConnect() throws Throwable { run(); } public void testWriteBeforeConnect(Bootstrap cb) throws Throwable { TestHandler h = new TestHandler(); SocketChannel ch = null; try { ch = (SocketChannel) cb.handler(h).connect().channel(); ch.writeAndFlush(Unpooled.wrappedBuffer(new byte[] { 1 })); } finally { if (ch != null) { ch.close(); } } } private static class TestHandler extends ChannelInboundHandlerAdapter { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { cause.printStackTrace(); } } }
apache-2.0
haikuowuya/android_system_code
src/android/provider/SyncConstValue.java
2571
/* * Copyright (C) 2006 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package android.provider; /** * Columns for tables that are synced to a server. * @deprecated Do not use. * @hide */ public interface SyncConstValue { /** * The account that was used to sync the entry to the device. * <P>Type: TEXT</P> */ public static final String _SYNC_ACCOUNT = "_sync_account"; /** * The type of the account that was used to sync the entry to the device. * <P>Type: TEXT</P> */ public static final String _SYNC_ACCOUNT_TYPE = "_sync_account_type"; /** * The unique ID for a row assigned by the sync source. NULL if the row has never been synced. * <P>Type: TEXT</P> */ public static final String _SYNC_ID = "_sync_id"; /** * The last time, from the sync source's point of view, that this row has been synchronized. * <P>Type: INTEGER (long)</P> */ public static final String _SYNC_TIME = "_sync_time"; /** * The version of the row, as assigned by the server. * <P>Type: TEXT</P> */ public static final String _SYNC_VERSION = "_sync_version"; /** * Used in temporary provider while syncing, always NULL for rows in persistent providers. * <P>Type: INTEGER (long)</P> */ public static final String _SYNC_LOCAL_ID = "_sync_local_id"; /** * Used only in persistent providers, and only during merging. * <P>Type: INTEGER (long)</P> */ public static final String _SYNC_MARK = "_sync_mark"; /** * Used to indicate that local, unsynced, changes are present. * <P>Type: INTEGER (long)</P> */ public static final String _SYNC_DIRTY = "_sync_dirty"; /** * Used to indicate that this account is not synced */ public static final String NON_SYNCABLE_ACCOUNT = "non_syncable"; /** * Used to indicate that this account is not synced */ public static final String NON_SYNCABLE_ACCOUNT_TYPE = "android.local"; }
apache-2.0
astubbs/wicket.get-portals2
wicket/src/test/java/org/apache/wicket/markup/parser/Page_3.java
1226
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.wicket.markup.parser; import org.apache.wicket.markup.html.WebPage; import org.apache.wicket.markup.html.basic.Label; /** * Mock page for testing. * * @author Chris Turner */ public class Page_3 extends WebPage { private static final long serialVersionUID = 1L; /** * Construct. * */ public Page_3() { add(new Label("myname", "Hello")); } }
apache-2.0
rahulopengts/myhome
bundles/binding/org.openhab.binding.digitalstrom/src/main/java/org/openhab/binding/digitalstrom/internal/client/entity/impl/JSONDeviceImpl.java
11722
/** * Copyright (c) 2010-2015, openHAB.org and others. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html */ package org.openhab.binding.digitalstrom.internal.client.entity.impl; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import org.json.simple.JSONArray; import org.json.simple.JSONObject; import org.openhab.binding.digitalstrom.internal.client.constants.DeviceConstants; import org.openhab.binding.digitalstrom.internal.client.constants.JSONApiResponseKeysEnum; import org.openhab.binding.digitalstrom.internal.client.constants.OutputModeEnum; import org.openhab.binding.digitalstrom.internal.client.entity.DSID; import org.openhab.binding.digitalstrom.internal.client.entity.Device; import org.openhab.binding.digitalstrom.internal.client.entity.DeviceSceneSpec; import org.openhab.binding.digitalstrom.internal.client.events.DeviceListener; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * @author Alexander Betker * @author Alex Maier * @since 1.3.0 */ public class JSONDeviceImpl implements Device { private static final Logger logger = LoggerFactory.getLogger(JSONDeviceImpl.class); private DSID dsid = null; private String name = null; private int zoneId = 0; private boolean isPresent = false; private boolean isOn = false; private OutputModeEnum outputMode = null; private int outputValue = 0; private int maxOutputValue = DeviceConstants.DEFAULT_MAX_OUTPUTVALUE; private int minOutputValue = 0; private int slatPosition = 0; private int maxSlatPosition = DeviceConstants.MAX_SLAT_POSITION; private int minSlatPosition = DeviceConstants.MIN_SLAT_POSITION; private int powerConsumption = 0; private int energyMeterValue = 0; private int electricMeterValue = 0; private List<Short> groupList = new LinkedList<Short>(); private List<DeviceListener> deviceListenerList = Collections.synchronizedList(new LinkedList<DeviceListener>()); private Map<Short, DeviceSceneSpec> sceneConfigMap = Collections.synchronizedMap(new HashMap<Short, DeviceSceneSpec>()); private Map<Short, Short> sceneOutputMap = Collections.synchronizedMap(new HashMap<Short, Short>()); public JSONDeviceImpl(JSONObject object) { if (object.get(JSONApiResponseKeysEnum.DEVICE_NAME.getKey()) != null) { this.name = object.get(JSONApiResponseKeysEnum.DEVICE_NAME.getKey()).toString(); } if (object.get(JSONApiResponseKeysEnum.DEVICE_ID.getKey()) != null) { this.dsid = new DSID(object.get(JSONApiResponseKeysEnum.DEVICE_ID.getKey()).toString()); } else if (object.get(JSONApiResponseKeysEnum.DEVICE_ID_QUERY.getKey()) != null) { this.dsid = new DSID(object.get(JSONApiResponseKeysEnum.DEVICE_ID_QUERY.getKey()).toString()); } if (object.get(JSONApiResponseKeysEnum.DEVICE_ON.getKey()) != null) { this.isOn = object.get(JSONApiResponseKeysEnum.DEVICE_ON.getKey()).toString().equals("true"); } if (object.get(JSONApiResponseKeysEnum.DEVICE_IS_PRESENT.getKey()) != null) { this.isPresent = object.get(JSONApiResponseKeysEnum.DEVICE_IS_PRESENT.getKey()).toString().equals("true"); } else if(object.get(JSONApiResponseKeysEnum.DEVICE_IS_PRESENT_QUERY.getKey()) != null) { this.isPresent = object.get(JSONApiResponseKeysEnum.DEVICE_IS_PRESENT_QUERY.getKey()).toString().equals("true"); } String zoneStr = null; if (object.get(JSONApiResponseKeysEnum.DEVICE_ZONE_ID.getKey()) != null) { zoneStr = object.get(JSONApiResponseKeysEnum.DEVICE_ZONE_ID.getKey()).toString(); } else if(object.get(JSONApiResponseKeysEnum.DEVICE_ZONE_ID_QUERY.getKey()) != null) { zoneStr = object.get(JSONApiResponseKeysEnum.DEVICE_ZONE_ID_QUERY.getKey()).toString(); } if (zoneStr != null) { try { this.zoneId = Integer.parseInt(zoneStr); } catch (java.lang.NumberFormatException e) { logger.error("NumberFormatException by parsing zoneId: "+zoneStr); } } if (object.get(JSONApiResponseKeysEnum.DEVICE_GROUPS.getKey()) instanceof JSONArray) { JSONArray array = (JSONArray) object.get(JSONApiResponseKeysEnum.DEVICE_GROUPS.getKey()); for (int i=0; i< array.size(); i++) { if (array.get(i) != null) { String value = array.get(i).toString(); short tmp = -1; try { tmp = Short.parseShort(value); } catch (java.lang.NumberFormatException e) { logger.error("NumberFormatException by parsing groups: "+value); } if (tmp != -1) { this.groupList.add(tmp); } } } } if (object.get(JSONApiResponseKeysEnum.DEVICE_OUTPUT_MODE.getKey()) != null) { int tmp = -1; try { tmp = Integer.parseInt(object.get(JSONApiResponseKeysEnum.DEVICE_OUTPUT_MODE.getKey()).toString()); } catch (java.lang.NumberFormatException e) { logger.error("NumberFormatException by parsing outputmode: "+object.get(JSONApiResponseKeysEnum.DEVICE_OUTPUT_MODE.getKey()).toString()); } if (tmp != -1) { if (OutputModeEnum.containsMode(tmp)) { outputMode = OutputModeEnum.getMode(tmp); } } } init(); } private void init() { if (groupList.contains((short)1)) { maxOutputValue = DeviceConstants.MAX_OUTPUT_VALUE_LIGHT; if (this.isDimmable()) { minOutputValue = DeviceConstants.MIN_DIMM_VALUE; } } else { maxOutputValue = DeviceConstants.DEFAULT_MAX_OUTPUTVALUE; minOutputValue = 0; } if(isOn) setOutputValue(DeviceConstants.DEFAULT_MAX_OUTPUTVALUE); } @Override public DSID getDSID() { return dsid; } @Override public String getName() { return name; } @Override public synchronized void setName(String name) { this.name = name; } @Override public List<Short> getGroups() { return groupList; } @Override public int getZoneId() { return zoneId; } @Override public boolean isPresent() { return isPresent; } @Override public boolean isOn() { return isOn; } @Override public void setIsOn(boolean flag) { //if device is off set power consumption and energy meter value to 0 if(flag == false){ this.powerConsumption=0; this.energyMeterValue=0; } this.isOn = flag; } @Override public synchronized void setOutputValue(int value) { if (value <= 0) { outputValue = 0; setIsOn(false); } else if (value > maxOutputValue) { outputValue = maxOutputValue; setIsOn(true); } else { outputValue = value; setIsOn(true); } notifyDeviceListener(dsid.getValue()); } @Override public boolean isDimmable() { if (outputMode == null) { return false; } return outputMode.equals(OutputModeEnum.DIMMED); } @Override public OutputModeEnum getOutputMode() { return outputMode; } @Override public synchronized void increase() { if (isDimmable()) { if (outputValue == maxOutputValue) { return; } if ((outputValue + getDimmStep()) > maxOutputValue) { outputValue = maxOutputValue; } else { outputValue += getDimmStep(); } setIsOn(true); notifyDeviceListener(this.dsid.getValue()); } } @Override public synchronized void decrease() { if (isDimmable()) { if (outputValue == minOutputValue) { if (outputValue == 0) { setIsOn(false); } return; } if ((outputValue - getDimmStep()) <= minOutputValue) { if (isOn) { System.out.println("Device isOn"); outputValue = minOutputValue; } if (minOutputValue == 0) { setIsOn(false); } else { if (outputValue != 0) { setIsOn(true); } } } else { outputValue -= getDimmStep(); setIsOn(true); } notifyDeviceListener(this.dsid.getValue()); } } @Override public int getOutputValue() { return outputValue; } @Override public int getMaxOutPutValue() { return maxOutputValue; } @Override public boolean isRollershutter() { if (outputMode == null) { return false; } return outputMode.equals(OutputModeEnum.UP_DOWN) || outputMode.equals(OutputModeEnum.SLAT); } @Override public int getSlatPosition() { return slatPosition; } @Override public synchronized void setSlatPosition(int position) { if (position < minSlatPosition) { slatPosition = minSlatPosition; } else if (position > this.maxSlatPosition) { slatPosition = this.maxSlatPosition; } else { this.slatPosition = position; } notifyDeviceListener(this.dsid.getValue()); } @Override public int getPowerConsumption() { return powerConsumption; } @Override public synchronized void setPowerConsumption(int powerConspumtion) { if (powerConspumtion < 0) { this.powerConsumption = 0; } else { this.powerConsumption = powerConspumtion; } notifyDeviceListener(this.dsid.getValue()); } @Override public int getEnergyMeterValue() { return energyMeterValue; } @Override public synchronized void setEnergyMeterValue(int value) { if (value < 0) { energyMeterValue = 0; } else { energyMeterValue = value; } notifyDeviceListener(this.dsid.getValue()); } @Override public void addDeviceListener(DeviceListener listener) { if (listener != null) { if (!this.deviceListenerList.contains(listener)) { this.deviceListenerList.add(listener); } } } @Override public void removeDeviceListener(DeviceListener listener) { if (listener != null) { if (this.deviceListenerList.contains(listener)) { this.deviceListenerList.remove(listener); } } } @Override public void notifyDeviceListener(String dsid) { for (DeviceListener listener: this.deviceListenerList) { listener.deviceUpdated(dsid); } } @Override public int getElectricMeterValue() { return electricMeterValue; } @Override public synchronized void setElectricMeterValue(int electricMeterValue) { if (electricMeterValue < 0) { this.electricMeterValue = 0; } else { this.electricMeterValue = electricMeterValue; } notifyDeviceListener(this.dsid.getValue()); } private short getDimmStep() { if (isDimmable()) { return DeviceConstants.DIMM_STEP_LIGHT; } else if (isRollershutter()) { return DeviceConstants.MOVE_STEP_ROLLERSHUTTER; } else { return DeviceConstants.DEFAULT_MOVE_STEP; } } @Override public int getMaxSlatPosition() { return maxSlatPosition; } @Override public int getMinSlatPosition() { return minSlatPosition; } @Override public short getSceneOutputValue(short sceneId) { synchronized(sceneOutputMap) { if (sceneOutputMap.containsKey(sceneId)) { return sceneOutputMap.get(sceneId); } } return -1; } @Override public void setSceneOutputValue(short sceneId, short value) { synchronized(sceneOutputMap) { sceneOutputMap.put(sceneId, value); } } @Override public void addSceneConfig(short sceneId, DeviceSceneSpec sceneSpec) { if (sceneSpec != null) { synchronized(sceneConfigMap) { sceneConfigMap.put(sceneId, sceneSpec); } } } @Override public boolean doIgnoreScene(short sceneId) { synchronized(sceneConfigMap) { if (this.sceneConfigMap.containsKey(sceneId)) { return this.sceneConfigMap.get(sceneId).isDontCare(); } } return false; } @Override public boolean containsSceneConfig(short sceneId) { synchronized(sceneConfigMap) { return sceneConfigMap.containsKey(sceneId); } } @Override public boolean equals(Object obj) { if (obj instanceof Device) { Device device = (Device)obj; return device.getDSID().equals(this.getDSID()); } return false; } }
epl-1.0
sujeet14108/teammates
src/test/java/teammates/test/cases/automated/FeedbackSessionClosedRemindersActionTest.java
4050
package teammates.test.cases.automated; import java.util.List; import java.util.Map; import org.testng.annotations.Test; import teammates.common.datatransfer.attributes.FeedbackSessionAttributes; import teammates.common.util.Const; import teammates.common.util.Const.ParamsNames; import teammates.common.util.EmailType; import teammates.common.util.TaskWrapper; import teammates.common.util.TimeHelper; import teammates.logic.core.CoursesLogic; import teammates.logic.core.FeedbackSessionsLogic; import teammates.test.driver.TimeHelperExtension; import teammates.ui.automated.FeedbackSessionClosedRemindersAction; /** * SUT: {@link FeedbackSessionClosedRemindersAction}. */ public class FeedbackSessionClosedRemindersActionTest extends BaseAutomatedActionTest { private static final CoursesLogic coursesLogic = CoursesLogic.inst(); private static final FeedbackSessionsLogic fsLogic = FeedbackSessionsLogic.inst(); @Override protected String getActionUri() { return Const.ActionURIs.AUTOMATED_FEEDBACK_CLOSED_REMINDERS; } @Test public void allTests() throws Exception { ______TS("default state of typical data bundle: 0 sessions closed recently"); FeedbackSessionClosedRemindersAction action = getAction(); action.execute(); verifyNoTasksAdded(action); ______TS("1 session closed recently, 1 session closed recently with disabled closed reminder, " + "1 session closed recently but still in grace period"); // Session is closed recently FeedbackSessionAttributes session1 = dataBundle.feedbackSessions.get("session1InCourse1"); session1.setTimeZone(0); session1.setStartTime(TimeHelper.getDateOffsetToCurrentTime(-2)); session1.setEndTime(TimeHelperExtension.getHoursOffsetToCurrentTime(-1)); fsLogic.updateFeedbackSession(session1); verifyPresentInDatastore(session1); // Ditto, but with disabled closed reminder FeedbackSessionAttributes session2 = dataBundle.feedbackSessions.get("session2InCourse1"); session2.setTimeZone(0); session2.setStartTime(TimeHelper.getDateOffsetToCurrentTime(-2)); session2.setEndTime(TimeHelperExtension.getHoursOffsetToCurrentTime(-1)); session2.setClosingEmailEnabled(false); fsLogic.updateFeedbackSession(session2); verifyPresentInDatastore(session2); // Still in grace period; closed reminder should not be sent FeedbackSessionAttributes session3 = dataBundle.feedbackSessions.get("gracePeriodSession"); session3.setTimeZone(0); session3.setStartTime(TimeHelper.getDateOffsetToCurrentTime(-2)); session3.setEndTime(TimeHelper.getDateOffsetToCurrentTime(0)); fsLogic.updateFeedbackSession(session3); verifyPresentInDatastore(session3); action = getAction(); action.execute(); // 5 students and 5 instructors in course1 verifySpecifiedTasksAdded(action, Const.TaskQueue.SEND_EMAIL_QUEUE_NAME, 10); String courseName = coursesLogic.getCourse(session1.getCourseId()).getName(); List<TaskWrapper> tasksAdded = action.getTaskQueuer().getTasksAdded(); for (TaskWrapper task : tasksAdded) { Map<String, String[]> paramMap = task.getParamMap(); assertEquals(String.format(EmailType.FEEDBACK_CLOSED.getSubject(), courseName, session1.getSessionName()), paramMap.get(ParamsNames.EMAIL_SUBJECT)[0]); } ______TS("1 session closed recently with closed emails sent"); session1.setSentClosedEmail(true); fsLogic.updateFeedbackSession(session1); action = getAction(); action.execute(); verifyNoTasksAdded(action); } @Override protected FeedbackSessionClosedRemindersAction getAction(String... params) { return (FeedbackSessionClosedRemindersAction) gaeSimulation.getAutomatedActionObject(getActionUri()); } }
gpl-2.0
asedunov/intellij-community
xml/xml-analysis-impl/src/com/intellij/codeInsight/daemon/impl/analysis/RemoveTagIntentionFix.java
2521
/* * Copyright 2000-2016 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.codeInsight.daemon.impl.analysis; import com.intellij.codeInsight.daemon.XmlErrorMessages; import com.intellij.codeInspection.LocalQuickFixAndIntentionActionOnPsiElement; import com.intellij.openapi.editor.Editor; import com.intellij.openapi.project.Project; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiFile; import com.intellij.psi.util.PsiTreeUtil; import com.intellij.psi.xml.XmlTag; import org.jetbrains.annotations.Nls; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; /** * @author Pavel.Dolgov */ public class RemoveTagIntentionFix extends LocalQuickFixAndIntentionActionOnPsiElement { private final String myTagName; public RemoveTagIntentionFix(final String name, @NotNull final XmlTag tag) { super(tag); myTagName = name; } @NotNull @Override public String getText() { return XmlErrorMessages.message("remove.tag.quickfix.text", myTagName); } @Nls @NotNull @Override public String getFamilyName() { return XmlErrorMessages.message("remove.tag.quickfix.family"); } @Override public void invoke(@NotNull Project project, @NotNull PsiFile file, @Nullable("is null when called from inspection") Editor editor, @NotNull PsiElement startElement, @NotNull PsiElement endElement) { final XmlTag next = editor != null ? PsiTreeUtil.getNextSiblingOfType(startElement, XmlTag.class) : null; final XmlTag prev = editor != null ? PsiTreeUtil.getPrevSiblingOfType(startElement, XmlTag.class) : null; startElement.delete(); if (editor != null) { if (next != null) { editor.getCaretModel().moveToOffset(next.getTextRange().getStartOffset()); } else if (prev != null) { editor.getCaretModel().moveToOffset(prev.getTextRange().getEndOffset()); } } } }
apache-2.0
profjrr/zaproxy
src/org/zaproxy/zap/extension/spider/SpiderScan.java
9333
/* * Zed Attack Proxy (ZAP) and its related class files. * * ZAP is an HTTP/HTTPS proxy for assessing web application security. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Note that this extension and the other classes in this package are heavily * based on the original Paros ExtensionSpider! */ package org.zaproxy.zap.extension.spider; import java.util.ArrayList; import java.util.Collections; import java.util.ConcurrentModificationException; import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import javax.swing.table.TableModel; import org.apache.commons.httpclient.URI; import org.parosproxy.paros.network.HttpMessage; import org.parosproxy.paros.network.HttpRequestHeader; import org.parosproxy.paros.network.HttpResponseHeader; import org.zaproxy.zap.model.GenericScanner2; import org.zaproxy.zap.model.ScanListenner; import org.zaproxy.zap.model.ScanListenner2; import org.zaproxy.zap.model.Target; import org.zaproxy.zap.spider.SpiderListener; import org.zaproxy.zap.spider.SpiderParam; import org.zaproxy.zap.spider.filters.FetchFilter; import org.zaproxy.zap.spider.filters.FetchFilter.FetchStatus; import org.zaproxy.zap.spider.filters.ParseFilter; import org.zaproxy.zap.spider.parser.SpiderParser; import org.zaproxy.zap.users.User; public class SpiderScan implements ScanListenner, SpiderListener, GenericScanner2 { private static enum State { NOT_STARTED, RUNNING, PAUSED, FINISHED }; private static final EnumSet<FetchStatus> FETCH_STATUS_IN_SCOPE = EnumSet.of(FetchStatus.VALID, FetchStatus.SEED); private static final EnumSet<FetchStatus> FETCH_STATUS_OUT_OF_SCOPE = EnumSet.of( FetchStatus.OUT_OF_SCOPE, FetchStatus.OUT_OF_CONTEXT, FetchStatus.USER_RULES); private final Lock lock; private int scanId; private String displayName = ""; private Set<String> foundURIs; private List<SpiderResource> resourcesFound; private Set<String> foundURIsOutOfScope; private SpiderThread spiderThread = null; private State state; private int progress; private ScanListenner2 listener = null; public SpiderScan(ExtensionSpider extension, SpiderParam spiderParams, Target target, URI spiderURI, User scanUser, int scanId) { lock = new ReentrantLock(); this.scanId = scanId; foundURIs = Collections.synchronizedSet(new HashSet<String>()); resourcesFound = Collections.synchronizedList(new ArrayList<SpiderResource>()); foundURIsOutOfScope = Collections.synchronizedSet(new HashSet<String>()); state = State.NOT_STARTED; spiderThread = new SpiderThread(extension, spiderParams, "SpiderApi-" + scanId, this); spiderThread.setStartURI(spiderURI); spiderThread.setStartNode(target.getStartNode()); spiderThread.setScanContext(target.getContext()); spiderThread.setScanAsUser(scanUser); spiderThread.setJustScanInScope(target.isInScopeOnly()); spiderThread.setScanChildren(target.isRecurse()); } /** * Returns the ID of the scan. * * @return the ID of the scan */ @Override public int getScanId() { return scanId; } /** * Returns the {@code String} representation of the scan state (not started, running, paused or finished). * * @return the {@code String} representation of the scan state. */ public String getState() { lock.lock(); try { return state.toString(); } finally { lock.unlock(); } } /** * Returns the progress of the scan, an integer between 0 and 100. * * @return the progress of the scan. */ @Override public int getProgress() { return progress; } /** * Starts the scan. * <p> * The call to this method has no effect if the scan was already started. * </p> */ public void start() { lock.lock(); try { if (State.NOT_STARTED.equals(state)) { spiderThread.addSpiderListener(this); spiderThread.start(); state = State.RUNNING; } } finally { lock.unlock(); } } /** * Pauses the scan. * <p> * The call to this method has no effect if the scan is not running. * </p> */ @Override public void pauseScan() { lock.lock(); try { if (State.RUNNING.equals(state)) { spiderThread.pauseScan(); state = State.PAUSED; } } finally { lock.unlock(); } } /** * Resumes the scan. * <p> * The call to this method has no effect if the scan is not paused. * </p> */ @Override public void resumeScan() { lock.lock(); try { if (State.PAUSED.equals(state)) { spiderThread.resumeScan(); state = State.RUNNING; } } finally { lock.unlock(); } } /** * Stops the scan. * <p> * The call to this method has no effect if the scan was not yet started or has already finished. * </p> */ @Override public void stopScan() { lock.lock(); try { if (!State.NOT_STARTED.equals(state) && !State.FINISHED.equals(state)) { spiderThread.stopScan(); state = State.FINISHED; } } finally { lock.unlock(); } } /** * Returns the URLs found during the scan. * <p> * <strong>Note:</strong> Iterations must be {@code synchronized} on returned object. Failing to do so might result in * {@code ConcurrentModificationException}. * </p> * * @return the URLs found during the scan * @see ConcurrentModificationException */ public Set<String> getResults() { return foundURIs; } /** * Returns the resources found during the scan. * <p> * <strong>Note:</strong> Iterations must be {@code synchronized} on returned object. Failing to do so might result in * {@code ConcurrentModificationException}. * </p> * * @return the resources found during the scan * @see ConcurrentModificationException */ public List<SpiderResource> getResourcesFound() { return resourcesFound; } /** * Returns the URLs, out of scope, found during the scan. * <p> * <strong>Note:</strong> Iterations must be {@code synchronized} on returned object. Failing to do so might result in * {@code ConcurrentModificationException}. * </p> * * @return the URLs, out of scope, found during the scan * @see ConcurrentModificationException */ public Set<String> getResultsOutOfScope() { return foundURIsOutOfScope; } @Override public void readURI(HttpMessage msg) { HttpRequestHeader requestHeader = msg.getRequestHeader(); HttpResponseHeader responseHeader = msg.getResponseHeader(); resourcesFound.add(new SpiderResource( msg.getHistoryRef().getHistoryId(), requestHeader.getMethod(), requestHeader.getURI().toString(), responseHeader.getStatusCode(), responseHeader.getReasonPhrase())); } @Override public void spiderComplete(boolean successful) { lock.lock(); try { state = State.FINISHED; } finally { lock.unlock(); } if (listener != null) { listener.scanFinshed(this.getScanId(), this.getDisplayName()); } } @Override public void spiderProgress(int percentageComplete, int numberCrawled, int numberToCrawl) { this.progress = percentageComplete; if (listener != null) { listener.scanProgress(this.getScanId(), this.getDisplayName(), percentageComplete, 100); } } @Override public void foundURI(String uri, String method, FetchStatus status) { if (FETCH_STATUS_IN_SCOPE.contains(status)) { foundURIs.add(uri); } else if (FETCH_STATUS_OUT_OF_SCOPE.contains(status)) { foundURIsOutOfScope.add(uri); } } @Override public void run() { // TODO Auto-generated method stub } @Override public void setScanId(int id) { this.scanId = id; } @Override public void setDisplayName(String name) { this.displayName = name; } @Override public String getDisplayName() { return this.displayName; } @Override public boolean isStopped() { return this.spiderThread.isStopped(); } @Override public int getMaximum() { return 100; } @Override public boolean isPaused() { return this.spiderThread.isPaused(); } @Override public boolean isRunning() { return this.spiderThread.isRunning(); } @Override public void scanFinshed(String host) { this.spiderComplete(true); } @Override public void scanProgress(String host, int progress, int maximum) { } public TableModel getResultsTableModel() { return this.spiderThread.getResultsTableModel(); } public void setListener(ScanListenner2 listener) { this.listener = listener; } public void setCustomSpiderParsers(List<SpiderParser> customSpiderParsers) { spiderThread.setCustomSpiderParsers(customSpiderParsers); } public void setCustomFetchFilters(List<FetchFilter> customFetchFilters) { spiderThread.setCustomFetchFilters(customFetchFilters); } public void setCustomParseFilters(List<ParseFilter> customParseFilters) { spiderThread.setCustomParseFilters(customParseFilters); } }
apache-2.0
Miracle121/quickdic-dictionary.dictionary
jars/icu4j-52_1/main/classes/core/src/com/ibm/icu/impl/Grego.java
7708
/** ******************************************************************************* * Copyright (C) 2003-2008, International Business Machines Corporation and * others. All Rights Reserved. ******************************************************************************* * Partial port from ICU4C's Grego class in i18n/gregoimp.h. * * Methods ported, or moved here from OlsonTimeZone, initially * for work on Jitterbug 5470: * tzdata2006n Brazil incorrect fall-back date 2009-mar-01 * Only the methods necessary for that work are provided - this is not a full * port of ICU4C's Grego class (yet). * * These utilities are used by both OlsonTimeZone and SimpleTimeZone. */ package com.ibm.icu.impl; import com.ibm.icu.util.Calendar; /** * A utility class providing proleptic Gregorian calendar functions * used by time zone and calendar code. Do not instantiate. * * Note: Unlike GregorianCalendar, all computations performed by this * class occur in the pure proleptic GregorianCalendar. */ public class Grego { // Max/min milliseconds public static final long MIN_MILLIS = -184303902528000000L; public static final long MAX_MILLIS = 183882168921600000L; public static final int MILLIS_PER_SECOND = 1000; public static final int MILLIS_PER_MINUTE = 60*MILLIS_PER_SECOND; public static final int MILLIS_PER_HOUR = 60*MILLIS_PER_MINUTE; public static final int MILLIS_PER_DAY = 24*MILLIS_PER_HOUR; // January 1, 1 CE Gregorian private static final int JULIAN_1_CE = 1721426; // January 1, 1970 CE Gregorian private static final int JULIAN_1970_CE = 2440588; private static final int[] MONTH_LENGTH = new int[] { 31,28,31,30,31,30,31,31,30,31,30,31, 31,29,31,30,31,30,31,31,30,31,30,31 }; private static final int[] DAYS_BEFORE = new int[] { 0,31,59,90,120,151,181,212,243,273,304,334, 0,31,60,91,121,152,182,213,244,274,305,335 }; /** * Return true if the given year is a leap year. * @param year Gregorian year, with 0 == 1 BCE, -1 == 2 BCE, etc. * @return true if the year is a leap year */ public static final boolean isLeapYear(int year) { // year&0x3 == year%4 return ((year&0x3) == 0) && ((year%100 != 0) || (year%400 == 0)); } /** * Return the number of days in the given month. * @param year Gregorian year, with 0 == 1 BCE, -1 == 2 BCE, etc. * @param month 0-based month, with 0==Jan * @return the number of days in the given month */ public static final int monthLength(int year, int month) { return MONTH_LENGTH[month + (isLeapYear(year) ? 12 : 0)]; } /** * Return the length of a previous month of the Gregorian calendar. * @param year Gregorian year, with 0 == 1 BCE, -1 == 2 BCE, etc. * @param month 0-based month, with 0==Jan * @return the number of days in the month previous to the given month */ public static final int previousMonthLength(int year, int month) { return (month > 0) ? monthLength(year, month-1) : 31; } /** * Convert a year, month, and day-of-month, given in the proleptic * Gregorian calendar, to 1970 epoch days. * @param year Gregorian year, with 0 == 1 BCE, -1 == 2 BCE, etc. * @param month 0-based month, with 0==Jan * @param dom 1-based day of month * @return the day number, with day 0 == Jan 1 1970 */ public static long fieldsToDay(int year, int month, int dom) { int y = year - 1; long julian = 365 * y + floorDivide(y, 4) + (JULIAN_1_CE - 3) + // Julian cal floorDivide(y, 400) - floorDivide(y, 100) + 2 + // => Gregorian cal DAYS_BEFORE[month + (isLeapYear(year) ? 12 : 0)] + dom; // => month/dom return julian - JULIAN_1970_CE; // JD => epoch day } /** * Return the day of week on the 1970-epoch day * @param day the 1970-epoch day (integral value) * @return the day of week */ public static int dayOfWeek(long day) { long[] remainder = new long[1]; floorDivide(day + Calendar.THURSDAY, 7, remainder); int dayOfWeek = (int)remainder[0]; dayOfWeek = (dayOfWeek == 0) ? 7 : dayOfWeek; return dayOfWeek; } public static int[] dayToFields(long day, int[] fields) { if (fields == null || fields.length < 5) { fields = new int[5]; } // Convert from 1970 CE epoch to 1 CE epoch (Gregorian calendar) day += JULIAN_1970_CE - JULIAN_1_CE; long[] rem = new long[1]; long n400 = floorDivide(day, 146097, rem); long n100 = floorDivide(rem[0], 36524, rem); long n4 = floorDivide(rem[0], 1461, rem); long n1 = floorDivide(rem[0], 365, rem); int year = (int)(400 * n400 + 100 * n100 + 4 * n4 + n1); int dayOfYear = (int)rem[0]; if (n100 == 4 || n1 == 4) { dayOfYear = 365; // Dec 31 at end of 4- or 400-yr cycle } else { ++year; } boolean isLeap = isLeapYear(year); int correction = 0; int march1 = isLeap ? 60 : 59; // zero-based DOY for March 1 if (dayOfYear >= march1) { correction = isLeap ? 1 : 2; } int month = (12 * (dayOfYear + correction) + 6) / 367; // zero-based month int dayOfMonth = dayOfYear - DAYS_BEFORE[isLeap ? month + 12 : month] + 1; // one-based DOM int dayOfWeek = (int)((day + 2) % 7); // day 0 is Monday(2) if (dayOfWeek < 1 /* Sunday */) { dayOfWeek += 7; } dayOfYear++; // 1-based day of year fields[0] = year; fields[1] = month; fields[2] = dayOfMonth; fields[3] = dayOfWeek; fields[4] = dayOfYear; return fields; } /* * Convert long time to date/time fields * * result[0] : year * result[1] : month * result[2] : dayOfMonth * result[3] : dayOfWeek * result[4] : dayOfYear * result[5] : millisecond in day */ public static int[] timeToFields(long time, int[] fields) { if (fields == null || fields.length < 6) { fields = new int[6]; } long[] remainder = new long[1]; long day = floorDivide(time, 24*60*60*1000 /* milliseconds per day */, remainder); dayToFields(day, fields); fields[5] = (int)remainder[0]; return fields; } public static long floorDivide(long numerator, long denominator) { // We do this computation in order to handle // a numerator of Long.MIN_VALUE correctly return (numerator >= 0) ? numerator / denominator : ((numerator + 1) / denominator) - 1; } private static long floorDivide(long numerator, long denominator, long[] remainder) { if (numerator >= 0) { remainder[0] = numerator % denominator; return numerator / denominator; } long quotient = ((numerator + 1) / denominator) - 1; remainder[0] = numerator - (quotient * denominator); return quotient; } /* * Returns the ordinal number for the specified day of week in the month. * The valid return value is 1, 2, 3, 4 or -1. */ public static int getDayOfWeekInMonth(int year, int month, int dayOfMonth) { int weekInMonth = (dayOfMonth + 6)/7; if (weekInMonth == 4) { if (dayOfMonth + 7 > monthLength(year, month)) { weekInMonth = -1; } } else if (weekInMonth == 5) { weekInMonth = -1; } return weekInMonth; } }
apache-2.0
allancth/camel
components/camel-spring-boot/src/main/java/org/apache/camel/spring/boot/ComponentConfigurationProperties.java
1209
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.spring.boot; import org.springframework.boot.context.properties.ConfigurationProperties; @ConfigurationProperties(prefix = "camel.component") public class ComponentConfigurationProperties { private boolean enabled = true; public boolean isEnabled() { return enabled; } public void setEnabled(boolean enabled) { this.enabled = enabled; } }
apache-2.0
Samernieve/EnderIO
src/api/java/appeng/api/features/IWorldGen.java
1649
/* * The MIT License (MIT) * * Copyright (c) 2013 AlgorithmX2 * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package appeng.api.features; import net.minecraft.world.World; import net.minecraft.world.WorldProvider; public interface IWorldGen { void disableWorldGenForProviderID( WorldGenType type, Class<? extends WorldProvider> provider ); void enableWorldGenForDimension( WorldGenType type, int dimID ); void disableWorldGenForDimension( WorldGenType type, int dimID ); boolean isWorldGenEnabled( WorldGenType type, World w ); enum WorldGenType { CertusQuartz, ChargedCertusQuartz, Meteorites } }
unlicense
robin13/elasticsearch
x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java
2091
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ package org.elasticsearch.xpack.ilm.action; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.ilm.ExplainLifecycleRequest; import org.elasticsearch.xpack.core.ilm.action.ExplainLifecycleAction; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestExplainLifecycleAction extends BaseRestHandler { @Override public List<Route> routes() { return List.of(new Route(GET, "/{index}/_ilm/explain")); } @Override public String getName() { return "ilm_explain_action"; } @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { String[] indexes = Strings.splitStringByCommaToArray(restRequest.param("index")); ExplainLifecycleRequest explainLifecycleRequest = new ExplainLifecycleRequest(); explainLifecycleRequest.indices(indexes); explainLifecycleRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, IndicesOptions.strictExpandOpen())); explainLifecycleRequest.onlyManaged(restRequest.paramAsBoolean("only_managed", false)); explainLifecycleRequest.onlyErrors(restRequest.paramAsBoolean("only_errors", false)); String masterNodeTimeout = restRequest.param("master_timeout"); if (masterNodeTimeout != null) { explainLifecycleRequest.masterNodeTimeout(masterNodeTimeout); } return channel -> client.execute(ExplainLifecycleAction.INSTANCE, explainLifecycleRequest, new RestToXContentListener<>(channel)); } }
apache-2.0
mgherghe/gateway
transport/wsn/src/test/java/org/kaazing/gateway/transport/wsn/specification/ws/acceptor/ControlIT.java
5573
/** * Copyright 2007-2016, Kaazing Corporation. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kaazing.gateway.transport.wsn.specification.ws.acceptor; import static org.kaazing.test.util.ITUtil.createRuleChain; import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestRule; import org.kaazing.gateway.server.test.GatewayRule; import org.kaazing.gateway.server.test.config.GatewayConfiguration; import org.kaazing.gateway.server.test.config.builder.GatewayConfigurationBuilder; import org.kaazing.k3po.junit.annotation.Specification; import org.kaazing.k3po.junit.rules.K3poRule; /** * RFC-6455, section 5.5 "Control Frames" */ public class ControlIT { private final K3poRule k3po = new K3poRule().setScriptRoot("org/kaazing/specification/ws/control"); private final GatewayRule gateway = new GatewayRule() { { // @formatter:off GatewayConfiguration configuration = new GatewayConfigurationBuilder() .service() .accept("ws://localhost:8080/echo") .type("echo") .crossOrigin() .allowOrigin("http://localhost:8001") .done() .done() .done(); // @formatter:on init(configuration); } }; @Rule public TestRule chain = createRuleChain(gateway, k3po); @Test @Specification({ "client.send.close.payload.length.0/handshake.request.and.frame" }) public void shouldEchoClientCloseFrameWithEmptyPayload() throws Exception { k3po.finish(); } @Test @Specification({ "client.send.close.payload.length.1/handshake.request.and.frame" }) public void shouldEchoClientCloseFrameWithPayloadSize1() throws Exception { k3po.finish(); } @Test @Specification({ "client.send.close.payload.length.125/handshake.request.and.frame" }) public void shouldEchoClientCloseFrameWithPayload() throws Exception { k3po.finish(); } @Test @Specification({ "client.send.close.payload.length.126/handshake.request.and.frame" }) public void shouldFailWebSocketConnectionWhenClientSendCloseFrameWithPayloadTooLong() throws Exception { k3po.finish(); } @Test @Specification({ "client.send.ping.payload.length.0/handshake.request.and.frame" }) public void shouldPongClientPingFrameWithEmptyPayload() throws Exception { k3po.finish(); } @Test @Specification({ "client.send.ping.payload.length.125/handshake.request.and.frame" }) public void shouldPongClientPingFrameWithPayload() throws Exception { k3po.finish(); } @Test @Specification({ "client.send.ping.payload.length.126/handshake.request.and.frame" }) public void shouldFailWebSocketConnectionWhenClientSendPingFrameWithPayloadTooLong() throws Exception { k3po.finish(); } @Test @Ignore("Timeout error") @Specification({ "client.send.pong.payload.length.0/handshake.request.and.frame" }) public void shouldReceiveClientPongFrameWithEmptyPayload() throws Exception { k3po.finish(); } @Test @Ignore("Timeout error") @Specification({ "client.send.pong.payload.length.125/handshake.request.and.frame" }) public void shouldReceiveClientPongFrameWithPayload() throws Exception { k3po.finish(); } @Test @Specification({ "client.send.pong.payload.length.126/handshake.request.and.frame" }) public void shouldFailWebSocketConnectionWhenClientSendPongFrameWithPayloadTooLong() throws Exception { k3po.finish(); } @Test @Specification({ "client.send.opcode.0x0b/handshake.request.and.frame" }) public void shouldFailWebSocketConnectionWhenClientSendOpcode11Frame() throws Exception { k3po.finish(); } @Test @Specification({ "client.send.opcode.0x0c/handshake.request.and.frame" }) public void shouldFailWebSocketConnectionWhenClientSendOpcode12Frame() throws Exception { k3po.finish(); } @Test @Specification({ "client.send.opcode.0x0d/handshake.request.and.frame" }) public void shouldFailWebSocketConnectionWhenClientSendOpcode13Frame() throws Exception { k3po.finish(); } @Test @Specification({ "client.send.opcode.0x0e/handshake.request.and.frame" }) public void shouldFailWebSocketConnectionWhenClientSendOpcode14Frame() throws Exception { k3po.finish(); } @Test @Specification({ "client.send.opcode.0x0f/handshake.request.and.frame"}) public void shouldFailWebSocketConnectionWhenClientSendOpcode15Frame() throws Exception { k3po.finish(); } }
apache-2.0
rokn/Count_Words_2015
testing/openjdk2/jaxp/src/com/sun/org/apache/xerces/internal/jaxp/validation/XMLSchemaValidatorComponentManager.java
23097
/* * reserved comment block * DO NOT REMOVE OR ALTER! */ /* * Copyright 2005 The Apache Software Foundation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.sun.org.apache.xerces.internal.jaxp.validation; import java.util.HashMap; import java.util.Locale; import java.util.Iterator; import java.util.Map; import javax.xml.XMLConstants; import com.sun.org.apache.xerces.internal.impl.Constants; import com.sun.org.apache.xerces.internal.impl.XMLEntityManager; import com.sun.org.apache.xerces.internal.impl.XMLErrorReporter; import com.sun.org.apache.xerces.internal.impl.validation.ValidationManager; import com.sun.org.apache.xerces.internal.impl.xs.XMLSchemaValidator; import com.sun.org.apache.xerces.internal.impl.xs.XSMessageFormatter; import com.sun.org.apache.xerces.internal.util.DOMEntityResolverWrapper; import com.sun.org.apache.xerces.internal.util.ErrorHandlerWrapper; import com.sun.org.apache.xerces.internal.util.FeatureState; import com.sun.org.apache.xerces.internal.util.NamespaceSupport; import com.sun.org.apache.xerces.internal.util.ParserConfigurationSettings; import com.sun.org.apache.xerces.internal.util.PropertyState; import com.sun.org.apache.xerces.internal.util.Status; import com.sun.org.apache.xerces.internal.util.SymbolTable; import com.sun.org.apache.xerces.internal.utils.XMLSecurityPropertyManager; import com.sun.org.apache.xerces.internal.utils.XMLSecurityManager; import com.sun.org.apache.xerces.internal.xni.NamespaceContext; import com.sun.org.apache.xerces.internal.xni.XNIException; import com.sun.org.apache.xerces.internal.xni.parser.XMLComponent; import com.sun.org.apache.xerces.internal.xni.parser.XMLComponentManager; import com.sun.org.apache.xerces.internal.xni.parser.XMLConfigurationException; import org.w3c.dom.ls.LSResourceResolver; import org.xml.sax.ErrorHandler; /** * <p>An implementation of XMLComponentManager for a schema validator.</p> * * @author Michael Glavassevich, IBM * @version $Id: XMLSchemaValidatorComponentManager.java,v 1.9 2010-11-01 04:40:08 joehw Exp $ */ final class XMLSchemaValidatorComponentManager extends ParserConfigurationSettings implements XMLComponentManager { // feature identifiers /** Feature identifier: schema validation. */ private static final String SCHEMA_VALIDATION = Constants.XERCES_FEATURE_PREFIX + Constants.SCHEMA_VALIDATION_FEATURE; /** Feature identifier: validation. */ private static final String VALIDATION = Constants.SAX_FEATURE_PREFIX + Constants.VALIDATION_FEATURE; /** Feature identifier: send element default value via characters() */ private static final String SCHEMA_ELEMENT_DEFAULT = Constants.XERCES_FEATURE_PREFIX + Constants.SCHEMA_ELEMENT_DEFAULT; /** Feature identifier: use grammar pool only. */ private static final String USE_GRAMMAR_POOL_ONLY = Constants.XERCES_FEATURE_PREFIX + Constants.USE_GRAMMAR_POOL_ONLY_FEATURE; // property identifiers /** Property identifier: entity manager. */ private static final String ENTITY_MANAGER = Constants.XERCES_PROPERTY_PREFIX + Constants.ENTITY_MANAGER_PROPERTY; /** Property identifier: entity resolver. */ private static final String ENTITY_RESOLVER = Constants.XERCES_PROPERTY_PREFIX + Constants.ENTITY_RESOLVER_PROPERTY; /** Property identifier: error handler. */ private static final String ERROR_HANDLER = Constants.XERCES_PROPERTY_PREFIX + Constants.ERROR_HANDLER_PROPERTY; /** Property identifier: error reporter. */ private static final String ERROR_REPORTER = Constants.XERCES_PROPERTY_PREFIX + Constants.ERROR_REPORTER_PROPERTY; /** Property identifier: namespace context. */ private static final String NAMESPACE_CONTEXT = Constants.XERCES_PROPERTY_PREFIX + Constants.NAMESPACE_CONTEXT_PROPERTY; /** Property identifier: XML Schema validator. */ private static final String SCHEMA_VALIDATOR = Constants.XERCES_PROPERTY_PREFIX + Constants.SCHEMA_VALIDATOR_PROPERTY; /** Property identifier: security manager. */ private static final String SECURITY_MANAGER = Constants.XERCES_PROPERTY_PREFIX + Constants.SECURITY_MANAGER_PROPERTY; /** Property identifier: security property manager. */ private static final String XML_SECURITY_PROPERTY_MANAGER = Constants.XML_SECURITY_PROPERTY_MANAGER; /** Property identifier: symbol table. */ private static final String SYMBOL_TABLE = Constants.XERCES_PROPERTY_PREFIX + Constants.SYMBOL_TABLE_PROPERTY; /** Property identifier: validation manager. */ private static final String VALIDATION_MANAGER = Constants.XERCES_PROPERTY_PREFIX + Constants.VALIDATION_MANAGER_PROPERTY; /** Property identifier: grammar pool. */ private static final String XMLGRAMMAR_POOL = Constants.XERCES_PROPERTY_PREFIX + Constants.XMLGRAMMAR_POOL_PROPERTY; /** Property identifier: locale. */ private static final String LOCALE = Constants.XERCES_PROPERTY_PREFIX + Constants.LOCALE_PROPERTY; // // Data // /** * <p>State of secure mode.</p> */ private boolean _isSecureMode = false; /** * fConfigUpdated is set to true if there has been any change to the configuration settings, * i.e a feature or a property was changed. */ private boolean fConfigUpdated = true; /** * Tracks whether the validator should use components from * the grammar pool to the exclusion of all others. */ private boolean fUseGrammarPoolOnly; /** Lookup map for components required for validation. **/ private final HashMap fComponents = new HashMap(); // // Components // /** Entity manager. */ private XMLEntityManager fEntityManager; /** Error reporter. */ private XMLErrorReporter fErrorReporter; /** Namespace context. */ private NamespaceContext fNamespaceContext; /** XML Schema validator. */ private XMLSchemaValidator fSchemaValidator; /** Validation manager. */ private ValidationManager fValidationManager; // // Configuration // /** Stores initial feature values for validator reset. */ private final HashMap fInitFeatures = new HashMap(); /** Stores initial property values for validator reset. */ private final HashMap fInitProperties = new HashMap(); /** Stores the initial security manager. */ private XMLSecurityManager fInitSecurityManager; /** Stores the initial security property manager. */ private final XMLSecurityPropertyManager fSecurityPropertyMgr; // // User Objects // /** Application's ErrorHandler. **/ private ErrorHandler fErrorHandler = null; /** Application's LSResourceResolver. */ private LSResourceResolver fResourceResolver = null; /** Locale chosen by the application. */ private Locale fLocale = null; /** Constructs a component manager suitable for Xerces' schema validator. */ public XMLSchemaValidatorComponentManager(XSGrammarPoolContainer grammarContainer) { // setup components fEntityManager = new XMLEntityManager(); fComponents.put(ENTITY_MANAGER, fEntityManager); fErrorReporter = new XMLErrorReporter(); fComponents.put(ERROR_REPORTER, fErrorReporter); fNamespaceContext = new NamespaceSupport(); fComponents.put(NAMESPACE_CONTEXT, fNamespaceContext); fSchemaValidator = new XMLSchemaValidator(); fComponents.put(SCHEMA_VALIDATOR, fSchemaValidator); fValidationManager = new ValidationManager(); fComponents.put(VALIDATION_MANAGER, fValidationManager); // setup other properties fComponents.put(ENTITY_RESOLVER, null); fComponents.put(ERROR_HANDLER, null); fComponents.put(SYMBOL_TABLE, new SymbolTable()); // setup grammar pool fComponents.put(XMLGRAMMAR_POOL, grammarContainer.getGrammarPool()); fUseGrammarPoolOnly = grammarContainer.isFullyComposed(); // add schema message formatter to error reporter fErrorReporter.putMessageFormatter(XSMessageFormatter.SCHEMA_DOMAIN, new XSMessageFormatter()); // add all recognized features and properties and apply their defaults addRecognizedParamsAndSetDefaults(fEntityManager, grammarContainer); addRecognizedParamsAndSetDefaults(fErrorReporter, grammarContainer); addRecognizedParamsAndSetDefaults(fSchemaValidator, grammarContainer); boolean secureProcessing = grammarContainer.getFeature(XMLConstants.FEATURE_SECURE_PROCESSING); if (System.getSecurityManager() != null) { _isSecureMode = true; secureProcessing = true; } fInitSecurityManager = (XMLSecurityManager) grammarContainer.getProperty(SECURITY_MANAGER); if (fInitSecurityManager != null ) { fInitSecurityManager.setSecureProcessing(secureProcessing); } else { fInitSecurityManager = new XMLSecurityManager(secureProcessing); } setProperty(SECURITY_MANAGER, fInitSecurityManager); //pass on properties set on SchemaFactory fSecurityPropertyMgr = (XMLSecurityPropertyManager) grammarContainer.getProperty(Constants.XML_SECURITY_PROPERTY_MANAGER); setProperty(XML_SECURITY_PROPERTY_MANAGER, fSecurityPropertyMgr); } /** * Returns the state of a feature. * * @param featureId The feature identifier. * @return true if the feature is supported * * @throws XMLConfigurationException Thrown for configuration error. * In general, components should * only throw this exception if * it is <strong>really</strong> * a critical error. */ public FeatureState getFeatureState(String featureId) throws XMLConfigurationException { if (PARSER_SETTINGS.equals(featureId)) { return FeatureState.is(fConfigUpdated); } else if (VALIDATION.equals(featureId) || SCHEMA_VALIDATION.equals(featureId)) { return FeatureState.is(true); } else if (USE_GRAMMAR_POOL_ONLY.equals(featureId)) { return FeatureState.is(fUseGrammarPoolOnly); } else if (XMLConstants.FEATURE_SECURE_PROCESSING.equals(featureId)) { return FeatureState.is(fInitSecurityManager.isSecureProcessing()); } else if (SCHEMA_ELEMENT_DEFAULT.equals(featureId)) { return FeatureState.is(true); //pre-condition: VALIDATION and SCHEMA_VALIDATION are always true } return super.getFeatureState(featureId); } /** * Set the state of a feature. * * @param featureId The unique identifier (URI) of the feature. * @param state The requested state of the feature (true or false). * * @exception XMLConfigurationException If the requested feature is not known. */ public void setFeature(String featureId, boolean value) throws XMLConfigurationException { if (PARSER_SETTINGS.equals(featureId)) { throw new XMLConfigurationException(Status.NOT_SUPPORTED, featureId); } else if (value == false && (VALIDATION.equals(featureId) || SCHEMA_VALIDATION.equals(featureId))) { throw new XMLConfigurationException(Status.NOT_SUPPORTED, featureId); } else if (USE_GRAMMAR_POOL_ONLY.equals(featureId) && value != fUseGrammarPoolOnly) { throw new XMLConfigurationException(Status.NOT_SUPPORTED, featureId); } if (XMLConstants.FEATURE_SECURE_PROCESSING.equals(featureId)) { if (_isSecureMode && !value) { throw new XMLConfigurationException(Status.NOT_ALLOWED, XMLConstants.FEATURE_SECURE_PROCESSING); } fInitSecurityManager.setSecureProcessing(value); setProperty(SECURITY_MANAGER, fInitSecurityManager); if (value && Constants.IS_JDK8_OR_ABOVE) { fSecurityPropertyMgr.setValue(XMLSecurityPropertyManager.Property.ACCESS_EXTERNAL_DTD, XMLSecurityPropertyManager.State.FSP, Constants.EXTERNAL_ACCESS_DEFAULT_FSP); fSecurityPropertyMgr.setValue(XMLSecurityPropertyManager.Property.ACCESS_EXTERNAL_SCHEMA, XMLSecurityPropertyManager.State.FSP, Constants.EXTERNAL_ACCESS_DEFAULT_FSP); setProperty(XML_SECURITY_PROPERTY_MANAGER, fSecurityPropertyMgr); } return; } fConfigUpdated = true; fEntityManager.setFeature(featureId, value); fErrorReporter.setFeature(featureId, value); fSchemaValidator.setFeature(featureId, value); if (!fInitFeatures.containsKey(featureId)) { boolean current = super.getFeature(featureId); fInitFeatures.put(featureId, current ? Boolean.TRUE : Boolean.FALSE); } super.setFeature(featureId, value); } /** * Returns the value of a property. * * @param propertyId The property identifier. * @return the value of the property * * @throws XMLConfigurationException Thrown for configuration error. * In general, components should * only throw this exception if * it is <strong>really</strong> * a critical error. */ public PropertyState getPropertyState(String propertyId) throws XMLConfigurationException { if (LOCALE.equals(propertyId)) { return PropertyState.is(getLocale()); } final Object component = fComponents.get(propertyId); if (component != null) { return PropertyState.is(component); } else if (fComponents.containsKey(propertyId)) { return PropertyState.is(null); } return super.getPropertyState(propertyId); } /** * Sets the state of a property. * * @param propertyId The unique identifier (URI) of the property. * @param value The requested state of the property. * * @exception XMLConfigurationException If the requested property is not known. */ public void setProperty(String propertyId, Object value) throws XMLConfigurationException { if ( ENTITY_MANAGER.equals(propertyId) || ERROR_REPORTER.equals(propertyId) || NAMESPACE_CONTEXT.equals(propertyId) || SCHEMA_VALIDATOR.equals(propertyId) || SYMBOL_TABLE.equals(propertyId) || VALIDATION_MANAGER.equals(propertyId) || XMLGRAMMAR_POOL.equals(propertyId)) { throw new XMLConfigurationException(Status.NOT_SUPPORTED, propertyId); } fConfigUpdated = true; fEntityManager.setProperty(propertyId, value); fErrorReporter.setProperty(propertyId, value); fSchemaValidator.setProperty(propertyId, value); if (ENTITY_RESOLVER.equals(propertyId) || ERROR_HANDLER.equals(propertyId) || SECURITY_MANAGER.equals(propertyId)) { fComponents.put(propertyId, value); return; } else if (LOCALE.equals(propertyId)) { setLocale((Locale) value); fComponents.put(propertyId, value); return; } //check if the property is managed by security manager if (fInitSecurityManager == null || !fInitSecurityManager.setLimit(propertyId, XMLSecurityManager.State.APIPROPERTY, value)) { //check if the property is managed by security property manager if (fSecurityPropertyMgr == null || !fSecurityPropertyMgr.setValue(propertyId, XMLSecurityPropertyManager.State.APIPROPERTY, value)) { //fall back to the existing property manager if (!fInitProperties.containsKey(propertyId)) { fInitProperties.put(propertyId, super.getProperty(propertyId)); } super.setProperty(propertyId, value); } } } /** * Adds all of the component's recognized features and properties * to the list of default recognized features and properties, and * sets default values on the configuration for features and * properties which were previously absent from the configuration. * * @param component The component whose recognized features * and properties will be added to the configuration */ public void addRecognizedParamsAndSetDefaults(XMLComponent component, XSGrammarPoolContainer grammarContainer) { // register component's recognized features final String[] recognizedFeatures = component.getRecognizedFeatures(); addRecognizedFeatures(recognizedFeatures); // register component's recognized properties final String[] recognizedProperties = component.getRecognizedProperties(); addRecognizedProperties(recognizedProperties); // set default values setFeatureDefaults(component, recognizedFeatures, grammarContainer); setPropertyDefaults(component, recognizedProperties); } /** Calls reset on each of the components owned by this component manager. **/ public void reset() throws XNIException { fNamespaceContext.reset(); fValidationManager.reset(); fEntityManager.reset(this); fErrorReporter.reset(this); fSchemaValidator.reset(this); // Mark configuration as fixed. fConfigUpdated = false; } void setErrorHandler(ErrorHandler errorHandler) { fErrorHandler = errorHandler; setProperty(ERROR_HANDLER, (errorHandler != null) ? new ErrorHandlerWrapper(errorHandler) : new ErrorHandlerWrapper(DraconianErrorHandler.getInstance())); } ErrorHandler getErrorHandler() { return fErrorHandler; } void setResourceResolver(LSResourceResolver resourceResolver) { fResourceResolver = resourceResolver; setProperty(ENTITY_RESOLVER, new DOMEntityResolverWrapper(resourceResolver)); } LSResourceResolver getResourceResolver() { return fResourceResolver; } void setLocale(Locale locale) { fLocale = locale; fErrorReporter.setLocale(locale); } Locale getLocale() { return fLocale; } /** Cleans out configuration, restoring it to its initial state. */ void restoreInitialState() { fConfigUpdated = true; // Remove error resolver and error handler fComponents.put(ENTITY_RESOLVER, null); fComponents.put(ERROR_HANDLER, null); // Set the Locale back to null. setLocale(null); fComponents.put(LOCALE, null); // Restore initial security manager fComponents.put(SECURITY_MANAGER, fInitSecurityManager); // Set the Locale back to null. setLocale(null); fComponents.put(LOCALE, null); // Reset feature and property values to their initial values if (!fInitFeatures.isEmpty()) { Iterator iter = fInitFeatures.entrySet().iterator(); while (iter.hasNext()) { Map.Entry entry = (Map.Entry) iter.next(); String name = (String) entry.getKey(); boolean value = ((Boolean) entry.getValue()).booleanValue(); super.setFeature(name, value); } fInitFeatures.clear(); } if (!fInitProperties.isEmpty()) { Iterator iter = fInitProperties.entrySet().iterator(); while (iter.hasNext()) { Map.Entry entry = (Map.Entry) iter.next(); String name = (String) entry.getKey(); Object value = entry.getValue(); super.setProperty(name, value); } fInitProperties.clear(); } } /** Sets feature defaults for the given component on this configuration. */ private void setFeatureDefaults(final XMLComponent component, final String [] recognizedFeatures, XSGrammarPoolContainer grammarContainer) { if (recognizedFeatures != null) { for (int i = 0; i < recognizedFeatures.length; ++i) { String featureId = recognizedFeatures[i]; Boolean state = grammarContainer.getFeature(featureId); if (state == null) { state = component.getFeatureDefault(featureId); } if (state != null) { // Do not overwrite values already set on the configuration. if (!fFeatures.containsKey(featureId)) { fFeatures.put(featureId, state); // For newly added components who recognize this feature // but did not offer a default value, we need to make // sure these components will get an opportunity to read // the value before parsing begins. fConfigUpdated = true; } } } } } /** Sets property defaults for the given component on this configuration. */ private void setPropertyDefaults(final XMLComponent component, final String [] recognizedProperties) { if (recognizedProperties != null) { for (int i = 0; i < recognizedProperties.length; ++i) { String propertyId = recognizedProperties[i]; Object value = component.getPropertyDefault(propertyId); if (value != null) { // Do not overwrite values already set on the configuration. if (!fProperties.containsKey(propertyId)) { fProperties.put(propertyId, value); // For newly added components who recognize this property // but did not offer a default value, we need to make // sure these components will get an opportunity to read // the value before parsing begins. fConfigUpdated = true; } } } } } } // XMLSchemaValidatorComponentManager
mit
md-5/jdk10
test/jdk/java/lang/annotation/AnnotationsInheritanceOrderRedefinitionTest.java
7830
/* * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /* * @test * @bug 8011940 * @summary Test inheritance, order and class redefinition behaviour of RUNTIME * class annotations * @author plevart * @modules java.base/java.lang:open * java.base/sun.reflect.annotation */ import sun.reflect.annotation.AnnotationParser; import java.lang.annotation.Annotation; import java.lang.annotation.Inherited; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.StringJoiner; public class AnnotationsInheritanceOrderRedefinitionTest { @Retention(RetentionPolicy.RUNTIME) @Inherited @interface Ann1 { String value(); } @Retention(RetentionPolicy.RUNTIME) @Inherited @interface Ann2 { String value(); } @Retention(RetentionPolicy.RUNTIME) @Inherited @interface Ann3 { String value(); } @Ann1("A") @Ann2("A") static class A {} @Ann3("B") static class B extends A {} @Ann1("C") @Ann3("C") static class C extends B {} public static void main(String[] args) { StringBuilder msgs = new StringBuilder(); boolean ok = true; ok &= annotationsEqual(msgs, A.class, true, ann(Ann1.class, "A"), ann(Ann2.class, "A")); ok &= annotationsEqual(msgs, A.class, false, ann(Ann1.class, "A"), ann(Ann2.class, "A")); ok &= annotationsEqual(msgs, B.class, true, ann(Ann3.class, "B")); ok &= annotationsEqual(msgs, B.class, false, ann(Ann1.class, "A"), ann(Ann2.class, "A"), ann(Ann3.class, "B")); ok &= annotationsEqual(msgs, C.class, true, ann(Ann1.class, "C"), ann(Ann3.class, "C")); ok &= annotationsEqual(msgs, C.class, false, ann(Ann1.class, "C"), ann(Ann2.class, "A"), ann(Ann3.class, "C")); Annotation[] declaredAnnotatiosA = A.class.getDeclaredAnnotations(); Annotation[] annotationsA = A.class.getAnnotations(); Annotation[] declaredAnnotatiosB = B.class.getDeclaredAnnotations(); Annotation[] annotationsB = B.class.getAnnotations(); Annotation[] declaredAnnotatiosC = C.class.getDeclaredAnnotations(); Annotation[] annotationsC = C.class.getAnnotations(); incrementClassRedefinedCount(A.class); incrementClassRedefinedCount(B.class); incrementClassRedefinedCount(C.class); ok &= annotationsEqualButNotSame(msgs, A.class, true, declaredAnnotatiosA); ok &= annotationsEqualButNotSame(msgs, A.class, false, annotationsA); ok &= annotationsEqualButNotSame(msgs, B.class, true, declaredAnnotatiosB); ok &= annotationsEqualButNotSame(msgs, B.class, false, annotationsB); ok &= annotationsEqualButNotSame(msgs, C.class, true, declaredAnnotatiosC); ok &= annotationsEqualButNotSame(msgs, C.class, false, annotationsC); if (!ok) { throw new RuntimeException("test failure\n" + msgs); } } // utility methods private static boolean annotationsEqualButNotSame(StringBuilder msgs, Class<?> declaringClass, boolean declaredOnly, Annotation[] oldAnns) { if (!annotationsEqual(msgs, declaringClass, declaredOnly, oldAnns)) { return false; } Annotation[] anns = declaredOnly ? declaringClass.getDeclaredAnnotations() : declaringClass.getAnnotations(); List<Annotation> sameAnns = new ArrayList<>(); for (int i = 0; i < anns.length; i++) { if (anns[i] == oldAnns[i]) { sameAnns.add(anns[i]); } } if (!sameAnns.isEmpty()) { msgs.append(declaredOnly ? "declared " : "").append("annotations for ") .append(declaringClass.getSimpleName()) .append(" not re-parsed after class redefinition: ") .append(toSimpleString(sameAnns)).append("\n"); return false; } else { return true; } } private static boolean annotationsEqual(StringBuilder msgs, Class<?> declaringClass, boolean declaredOnly, Annotation... expectedAnns) { Annotation[] anns = declaredOnly ? declaringClass.getDeclaredAnnotations() : declaringClass.getAnnotations(); if (!Arrays.equals(anns, expectedAnns)) { msgs.append(declaredOnly ? "declared " : "").append("annotations for ") .append(declaringClass.getSimpleName()).append(" are: ") .append(toSimpleString(anns)).append(", expected: ") .append(toSimpleString(expectedAnns)).append("\n"); return false; } else { return true; } } private static Annotation ann(Class<? extends Annotation> annotationType, Object value) { return AnnotationParser.annotationForMap(annotationType, Collections.singletonMap("value", value)); } private static String toSimpleString(List<Annotation> anns) { return toSimpleString(anns.toArray(new Annotation[anns.size()])); } private static String toSimpleString(Annotation[] anns) { StringJoiner joiner = new StringJoiner(", "); for (Annotation ann : anns) { joiner.add(toSimpleString(ann)); } return joiner.toString(); } private static String toSimpleString(Annotation ann) { Class<? extends Annotation> annotationType = ann.annotationType(); Object value; try { value = annotationType.getDeclaredMethod("value").invoke(ann); } catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException e) { throw new RuntimeException(e); } return "@" + annotationType.getSimpleName() + "(" + value + ")"; } private static final Field classRedefinedCountField; static { try { classRedefinedCountField = Class.class.getDeclaredField("classRedefinedCount"); classRedefinedCountField.setAccessible(true); } catch (NoSuchFieldException e) { throw new Error(e); } } private static void incrementClassRedefinedCount(Class<?> clazz) { try { classRedefinedCountField.set(clazz, ((Integer) classRedefinedCountField.get(clazz)) + 1); } catch (IllegalAccessException e) { throw new RuntimeException(e); } } }
gpl-2.0
FauxFaux/jdk9-jdk
src/java.base/share/classes/javax/net/ssl/SNIServerName.java
7149
/* * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package javax.net.ssl; import java.util.Arrays; /** * Instances of this class represent a server name in a Server Name * Indication (SNI) extension. * <P> * The SNI extension is a feature that extends the SSL/TLS/DTLS protocols to * indicate what server name the client is attempting to connect to during * handshaking. See section 3, "Server Name Indication", of <A * HREF="http://www.ietf.org/rfc/rfc6066.txt">TLS Extensions (RFC 6066)</A>. * <P> * {@code SNIServerName} objects are immutable. Subclasses should not provide * methods that can change the state of an instance once it has been created. * * @see SSLParameters#getServerNames() * @see SSLParameters#setServerNames(List) * * @since 1.8 */ public abstract class SNIServerName { // the type of the server name private final int type; // the encoded value of the server name private final byte[] encoded; // the hex digitals private static final char[] HEXES = "0123456789ABCDEF".toCharArray(); /** * Creates an {@code SNIServerName} using the specified name type and * encoded value. * <P> * Note that the {@code encoded} byte array is cloned to protect against * subsequent modification. * * @param type * the type of the server name * @param encoded * the encoded value of the server name * * @throws IllegalArgumentException if {@code type} is not in the range * of 0 to 255, inclusive. * @throws NullPointerException if {@code encoded} is null */ protected SNIServerName(int type, byte[] encoded) { if (type < 0) { throw new IllegalArgumentException( "Server name type cannot be less than zero"); } else if (type > 255) { throw new IllegalArgumentException( "Server name type cannot be greater than 255"); } this.type = type; if (encoded == null) { throw new NullPointerException( "Server name encoded value cannot be null"); } this.encoded = encoded.clone(); } /** * Returns the name type of this server name. * * @return the name type of this server name */ public final int getType() { return type; } /** * Returns a copy of the encoded server name value of this server name. * * @return a copy of the encoded server name value of this server name */ public final byte[] getEncoded() { return encoded.clone(); } /** * Indicates whether some other object is "equal to" this server name. * * @return true if, and only if, {@code other} is of the same class * of this object, and has the same name type and * encoded value as this server name. */ @Override public boolean equals(Object other) { if (this == other) { return true; } if (this.getClass() != other.getClass()) { return false; } SNIServerName that = (SNIServerName)other; return (this.type == that.type) && Arrays.equals(this.encoded, that.encoded); } /** * Returns a hash code value for this server name. * <P> * The hash code value is generated using the name type and encoded * value of this server name. * * @return a hash code value for this server name. */ @Override public int hashCode() { int result = 17; // 17/31: prime number to decrease collisions result = 31 * result + type; result = 31 * result + Arrays.hashCode(encoded); return result; } /** * Returns a string representation of this server name, including the server * name type and the encoded server name value in this * {@code SNIServerName} object. * <P> * The exact details of the representation are unspecified and subject * to change, but the following may be regarded as typical: * <pre> * "type={@literal <name type>}, value={@literal <name value>}" * </pre> * <P> * In this class, the format of "{@literal <name type>}" is * "[LITERAL] (INTEGER)", where the optional "LITERAL" is the literal * name, and INTEGER is the integer value of the name type. The format * of "{@literal <name value>}" is "XX:...:XX", where "XX" is the * hexadecimal digit representation of a byte value. For example, a * returned value of an pseudo server name may look like: * <pre> * "type=(31), value=77:77:77:2E:65:78:61:6D:70:6C:65:2E:63:6E" * </pre> * or * <pre> * "type=host_name (0), value=77:77:77:2E:65:78:61:6D:70:6C:65:2E:63:6E" * </pre> * * <P> * Please NOTE that the exact details of the representation are unspecified * and subject to change, and subclasses may override the method with * their own formats. * * @return a string representation of this server name */ @Override public String toString() { if (type == StandardConstants.SNI_HOST_NAME) { return "type=host_name (0), value=" + toHexString(encoded); } else { return "type=(" + type + "), value=" + toHexString(encoded); } } // convert byte array to hex string private static String toHexString(byte[] bytes) { if (bytes.length == 0) { return "(empty)"; } StringBuilder sb = new StringBuilder(bytes.length * 3 - 1); boolean isInitial = true; for (byte b : bytes) { if (isInitial) { isInitial = false; } else { sb.append(':'); } int k = b & 0xFF; sb.append(HEXES[k >>> 4]); sb.append(HEXES[k & 0xF]); } return sb.toString(); } }
gpl-2.0
kaen/Terasology
engine/src/main/java/org/terasology/logic/characters/CharacterImpulseEvent.java
978
/* * Copyright 2016 MovingBlocks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.terasology.logic.characters; import org.terasology.entitySystem.event.Event; import org.terasology.math.geom.Vector3f; public class CharacterImpulseEvent implements Event { Vector3f direction; public CharacterImpulseEvent(Vector3f direction) { this.direction = direction; } public Vector3f getDirection() { return direction; } }
apache-2.0
gpolitis/jitsi
src/net/java/sip/communicator/plugin/defaultresourcepack/DefaultSettingsPackImpl.java
3815
/* * Jitsi, the OpenSource Java VoIP and Instant Messaging client. * * Copyright @ 2015 Atlassian Pty Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.java.sip.communicator.plugin.defaultresourcepack; import java.util.*; import net.java.sip.communicator.service.resources.*; /** * The default settings resource pack. * * @author Damian Minkov * @author Yana Stamcheva */ public class DefaultSettingsPackImpl implements SettingsPack { private static final String DEFAULT_RESOURCE_PATH = "resources.config.defaults"; /** * Returns a <tt>Map</tt>, containing all [key, value] pairs for this * resource pack. * * @return a <tt>Map</tt>, containing all [key, value] pairs for this * resource pack. */ public Map<String, String> getResources() { ResourceBundle resourceBundle = ResourceBundle.getBundle(DEFAULT_RESOURCE_PATH); Map<String, String> resources = new TreeMap<String, String>(); this.initResources(resourceBundle, resources); this.initPluginResources(resources); return resources; } /** * Returns the name of this resource pack. * * @return the name of this resource pack. */ public String getName() { return "Default Settings Resources"; } /** * Returns the description of this resource pack. * * @return the description of this resource pack. */ public String getDescription() { return "Provide Jitsi default settings resource pack."; } /** * Fills the given resource map with all (key,value) pairs obtained from the * given <tt>ResourceBundle</tt>. This method will look in the properties * files for references to other properties files and will include in the * final map data from all referenced files. * * @param resourceBundle The initial <tt>ResourceBundle</tt>, corresponding * to the "main" properties file. * @param resources A <tt>Map</tt> that would store the data. */ private void initResources( ResourceBundle resourceBundle, Map<String, String> resources) { Enumeration<String> colorKeys = resourceBundle.getKeys(); while (colorKeys.hasMoreElements()) { String key = colorKeys.nextElement(); String value = resourceBundle.getString(key); resources.put(key, value); } } /** * Finds all plugin color resources, matching the "defaults-*.properties" * pattern and adds them to this resource pack. */ private void initPluginResources(Map<String, String> resources) { Iterator<String> pluginProperties = DefaultResourcePackActivator .findResourcePaths( "resources/config", "defaults-*.properties"); while (pluginProperties.hasNext()) { String resourceBundleName = pluginProperties.next(); ResourceBundle resourceBundle = ResourceBundle.getBundle( resourceBundleName.substring( 0, resourceBundleName.indexOf(".properties"))); initResources(resourceBundle, resources); } } }
apache-2.0
GlenRSmith/elasticsearch
x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java
66595
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ package org.elasticsearch.xpack.ccr; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.seqno.RetentionLease; import org.elasticsearch.index.seqno.RetentionLeaseActions; import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; import org.elasticsearch.index.seqno.RetentionLeaseUtils; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportActionProxy; import org.elasticsearch.transport.TransportMessageListener; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.CcrIntegTestCase; import org.elasticsearch.xpack.ccr.action.repositories.ClearCcrRestoreSessionAction; import org.elasticsearch.xpack.ccr.repository.CcrRepository; import org.elasticsearch.xpack.core.ccr.action.ForgetFollowerAction; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; import org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction; import org.elasticsearch.xpack.core.ccr.action.UnfollowAction; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.retentionLeaseId; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; public class CcrRetentionLeaseIT extends CcrIntegTestCase { public static final class RetentionLeaseRenewIntervalSettingPlugin extends Plugin { @Override public List<Setting<?>> getSettings() { return Collections.singletonList(CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING); } } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Stream.concat(super.nodePlugins().stream(), Stream.of(RetentionLeaseRenewIntervalSettingPlugin.class)) .collect(Collectors.toList()); } @Override protected Settings followerClusterSettings() { return Settings.builder() .put(super.followerClusterSettings()) .put(CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(200)) .build(); } private final IndicesOptions indicesOptions = IndicesOptions.strictSingleIndexNoExpandForbidClosed(); private RestoreSnapshotRequest setUpRestoreSnapshotRequest( final String leaderIndex, final int numberOfShards, final int numberOfReplicas, final String followerIndex, final int numberOfDocuments ) throws IOException { final ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest().masterNodeTimeout(TimeValue.MAX_VALUE); final String chunkSize = new ByteSizeValue(randomFrom(4, 128, 1024), ByteSizeUnit.KB).getStringRep(); settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), chunkSize)); assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); final String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster"; final Map<String, String> additionalSettings = new HashMap<>(); additionalSettings.put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(200).getStringRep()); final String leaderIndexSettings = getIndexSettings(numberOfShards, numberOfReplicas, additionalSettings); assertAcked( leaderClient().admin() .indices() .prepareCreate(leaderIndex) .setMasterNodeTimeout(TimeValue.MAX_VALUE) .setSource(leaderIndexSettings, XContentType.JSON) ); ensureLeaderGreen(leaderIndex); logger.info("indexing [{}] docs", numberOfDocuments); for (int i = 0; i < numberOfDocuments; i++) { final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); leaderClient().prepareIndex(leaderIndex).setId(Integer.toString(i)).setSource(source, XContentType.JSON).get(); if (rarely()) { leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get(); } } leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get(); final Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, followerIndex) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); return new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST).indexSettings(settingsBuilder) .indices(leaderIndex) .indicesOptions(indicesOptions) .renamePattern("^(.*)$") .renameReplacement(followerIndex) .masterNodeTimeout(TimeValue.MAX_VALUE); } public void testRetentionLeaseIsTakenAtTheStartOfRecovery() throws Exception { final String leaderIndex = "leader"; final int numberOfShards = randomIntBetween(1, 3); final int numberOfReplicas = between(0, 1); final String followerIndex = "follower"; final int numberOfDocuments = scaledRandomIntBetween(1, 8192); final RestoreSnapshotRequest restoreRequest = setUpRestoreSnapshotRequest( leaderIndex, numberOfShards, numberOfReplicas, followerIndex, numberOfDocuments ); final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class); final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class); final PlainActionFuture<RestoreInfo> future = PlainActionFuture.newFuture(); restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); // ensure that a retention lease has been put in place on each shard assertBusy(() -> { final IndicesStatsResponse stats = leaderClient().admin() .indices() .stats(new IndicesStatsRequest().clear().indices(leaderIndex)) .actionGet(); assertNotNull(stats.getShards()); assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); final List<ShardStats> shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); final Map<String, RetentionLease> currentRetentionLeases = RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases( shardsStats.get(i).getRetentionLeaseStats().retentionLeases() ); assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.values(), hasSize(1)); final RetentionLease retentionLease = currentRetentionLeases.values().iterator().next(); assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); } }); final RestoreInfo restoreInfo = future.actionGet(); assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards()); assertEquals(0, restoreInfo.failedShards()); for (int i = 0; i < numberOfDocuments; ++i) { assertExpectedDocument(followerIndex, i); } } public void testRetentionLeaseIsRenewedDuringRecovery() throws Exception { final String leaderIndex = "leader"; final int numberOfShards = randomIntBetween(1, 3); final int numberOfReplicas = between(0, 1); final String followerIndex = "follower"; final int numberOfDocuments = scaledRandomIntBetween(1, 8192); final RestoreSnapshotRequest restoreRequest = setUpRestoreSnapshotRequest( leaderIndex, numberOfShards, numberOfReplicas, followerIndex, numberOfDocuments ); final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class); final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class); final CountDownLatch latch = new CountDownLatch(1); // block the recovery from completing; this ensures the background sync is still running final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = (MockTransportService) getFollowerCluster().getInstance( TransportService.class, senderNode.getName() ); senderTransportService.addSendBehavior((connection, requestId, action, request, options) -> { if (ClearCcrRestoreSessionAction.NAME.equals(action) || TransportActionProxy.getProxyAction(ClearCcrRestoreSessionAction.NAME).equals(action)) { try { latch.await(); } catch (final InterruptedException e) { fail(e.toString()); } } connection.sendRequest(requestId, action, request, options); }); } final PlainActionFuture<RestoreInfo> future = PlainActionFuture.newFuture(); restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); try { assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); latch.countDown(); } finally { for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = (MockTransportService) getFollowerCluster().getInstance( TransportService.class, senderNode.getName() ); senderTransportService.clearAllRules(); } } final RestoreInfo restoreInfo = future.actionGet(); assertEquals(restoreInfo.totalShards(), restoreInfo. successfulShards()); assertEquals(0, restoreInfo.failedShards()); for (int i = 0; i < numberOfDocuments; i++) { assertExpectedDocument(followerIndex, i); } } public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws Exception { final String leaderIndex = "leader"; final int numberOfShards = randomIntBetween(1, 3); final int numberOfReplicas = between(0, 1); final String followerIndex = "follower"; final int numberOfDocuments = scaledRandomIntBetween(1, 8192); final RestoreSnapshotRequest restoreRequest = setUpRestoreSnapshotRequest( leaderIndex, numberOfShards, numberOfReplicas, followerIndex, numberOfDocuments ); final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class); final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class); final PlainActionFuture<RestoreInfo> future = PlainActionFuture.newFuture(); restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); final RestoreInfo restoreInfo = future.actionGet(); final long start = System.nanoTime(); /* * We want to ensure that the retention leases have been synced to all shard copies, as otherwise they might sync between the two * times that we sample the retention leases, which would cause our check to fail. */ final TimeValue syncIntervalSetting = IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.get( leaderClient().admin().indices().prepareGetSettings(leaderIndex).get().getIndexToSettings().get(leaderIndex) ); final long syncEnd = System.nanoTime(); Thread.sleep(Math.max(0, randomIntBetween(2, 4) * syncIntervalSetting.millis() - TimeUnit.NANOSECONDS.toMillis(syncEnd - start))); final ClusterStateResponse leaderIndexClusterState = leaderClient().admin() .cluster() .prepareState() .clear() .setMetadata(true) .setIndices(leaderIndex) .get(); final String leaderUUID = leaderIndexClusterState.getState().metadata().index(leaderIndex).getIndexUUID(); /* * We want to ensure that the background renewal is cancelled at the end of recovery. To do this, we will sleep a small multiple * of the renew interval. If the renews are not cancelled, we expect that a renewal would have been sent while we were sleeping. * After we wake up, it should be the case that the retention leases are the same (same timestamp) as that indicates that they were * not renewed while we were sleeping. */ assertBusy(() -> { // sample the leases after recovery final List<Map<String, RetentionLease>> retentionLeases = new ArrayList<>(); assertBusy(() -> { retentionLeases.clear(); final IndicesStatsResponse stats = leaderClient().admin() .indices() .stats(new IndicesStatsRequest().clear().indices(leaderIndex)) .actionGet(); assertNotNull(stats.getShards()); assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); final List<ShardStats> shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); final Map<String, RetentionLease> currentRetentionLeases = RetentionLeaseUtils .toMapExcludingPeerRecoveryRetentionLeases(shardsStats.get(i).getRetentionLeaseStats().retentionLeases()); assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.values(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin() .cluster() .prepareState() .clear() .setMetadata(true) .setIndices(followerIndex) .get(); final String followerUUID = followerIndexClusterState.getState().metadata().index(followerIndex).getIndexUUID(); final RetentionLease retentionLease = currentRetentionLeases.values().iterator().next(); final String expectedRetentionLeaseId = retentionLeaseId( getFollowerCluster().getClusterName(), new Index(followerIndex, followerUUID), getLeaderCluster().getClusterName(), new Index(leaderIndex, leaderUUID) ); assertThat(retentionLease.id(), equalTo(expectedRetentionLeaseId)); retentionLeases.add(currentRetentionLeases); } }); // sleep a small multiple of the renew interval final TimeValue renewIntervalSetting = CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.get(followerClusterSettings()); final long renewEnd = System.nanoTime(); Thread.sleep( Math.max(0, randomIntBetween(2, 4) * renewIntervalSetting.millis() - TimeUnit.NANOSECONDS.toMillis(renewEnd - start)) ); // now ensure that the retention leases are the same final IndicesStatsResponse stats = leaderClient().admin() .indices() .stats(new IndicesStatsRequest().clear().indices(leaderIndex)) .actionGet(); assertNotNull(stats.getShards()); assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); final List<ShardStats> shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { if (shardsStats.get(i).getShardRouting().primary() == false) { continue; } assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); final Map<String, RetentionLease> currentRetentionLeases = RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases( shardsStats.get(i).getRetentionLeaseStats().retentionLeases() ); assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.values(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin() .cluster() .prepareState() .clear() .setMetadata(true) .setIndices(followerIndex) .get(); final String followerUUID = followerIndexClusterState.getState().metadata().index(followerIndex).getIndexUUID(); final RetentionLease retentionLease = currentRetentionLeases.values().iterator().next(); assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, followerUUID, leaderIndex, leaderUUID))); // we assert that retention leases are being renewed by an increase in the timestamp assertThat(retentionLease.timestamp(), equalTo(retentionLeases.get(i).values().iterator().next().timestamp())); } }); assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards()); assertEquals(0, restoreInfo.failedShards()); for (int i = 0; i < numberOfDocuments; ++i) { assertExpectedDocument(followerIndex, i); } } public void testUnfollowRemovesRetentionLeases() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; final int numberOfShards = randomIntBetween(1, 4); final String leaderIndexSettings = getIndexSettings(numberOfShards, 0); assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); ensureLeaderYellow(leaderIndex); final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); ensureFollowerGreen(true, followerIndex); final String retentionLeaseId = getRetentionLeaseId(followerIndex, leaderIndex); final IndicesStatsResponse stats = leaderClient().admin() .indices() .stats(new IndicesStatsRequest().clear().indices(leaderIndex)) .actionGet(); final List<ShardStats> shardsStats = getShardsStats(stats); for (final ShardStats shardStats : shardsStats) { final Map<String, RetentionLease> retentionLeases = RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases( shardStats.getRetentionLeaseStats().retentionLeases() ); assertThat(Strings.toString(shardStats), retentionLeases.values(), hasSize(1)); assertThat(retentionLeases.values().iterator().next().id(), equalTo(retentionLeaseId)); } // we will sometimes fake that some of the retention leases are already removed on the leader shard final Set<Integer> shardIds = new HashSet<>( randomSubsetOf(randomIntBetween(0, numberOfShards), IntStream.range(0, numberOfShards).boxed().collect(Collectors.toSet())) ); final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); try { for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = (MockTransportService) getFollowerCluster().getInstance( TransportService.class, senderNode.getName() ); senderTransportService.addSendBehavior((connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Remove.ACTION_NAME.equals(action) || TransportActionProxy.getProxyAction(RetentionLeaseActions.Remove.ACTION_NAME).equals(action)) { final RetentionLeaseActions.RemoveRequest removeRequest = (RetentionLeaseActions.RemoveRequest) request; if (shardIds.contains(removeRequest.getShardId().id())) { final String primaryShardNodeId = getLeaderCluster().clusterService() .state() .routingTable() .index(leaderIndex) .shard(removeRequest.getShardId().id()) .primaryShard() .currentNodeId(); final String primaryShardNodeName = getLeaderCluster().clusterService() .state() .nodes() .get(primaryShardNodeId) .getName(); final IndexShard primary = getLeaderCluster().getInstance(IndicesService.class, primaryShardNodeName) .getShardOrNull(removeRequest.getShardId()); final CountDownLatch latch = new CountDownLatch(1); primary.removeRetentionLease( retentionLeaseId, ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())) ); try { latch.await(); } catch (final InterruptedException e) { Thread.currentThread().interrupt(); fail(e.toString()); } } } connection.sendRequest(requestId, action, request, options); }); } pauseFollow(followerIndex); assertAcked(followerClient().admin().indices().close(new CloseIndexRequest(followerIndex)).actionGet()); assertAcked(followerClient().execute(UnfollowAction.INSTANCE, new UnfollowAction.Request(followerIndex)).actionGet()); final IndicesStatsResponse afterUnfollowStats = leaderClient().admin() .indices() .stats(new IndicesStatsRequest().clear().indices(leaderIndex)) .actionGet(); final List<ShardStats> afterUnfollowShardsStats = getShardsStats(afterUnfollowStats); for (final ShardStats shardStats : afterUnfollowShardsStats) { assertThat( Strings.toString(shardStats), RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(shardStats.getRetentionLeaseStats().retentionLeases()) .values(), empty() ); } } finally { for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = (MockTransportService) getFollowerCluster().getInstance( TransportService.class, senderNode.getName() ); senderTransportService.clearAllRules(); } } } public void testUnfollowFailsToRemoveRetentionLeases() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; final int numberOfShards = randomIntBetween(1, 4); final String leaderIndexSettings = getIndexSettings(numberOfShards, 0); assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); ensureLeaderYellow(leaderIndex); final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); ensureFollowerGreen(true, followerIndex); pauseFollow(followerIndex); followerClient().admin().indices().close(new CloseIndexRequest(followerIndex).masterNodeTimeout(TimeValue.MAX_VALUE)).actionGet(); // we will disrupt requests to remove retention leases for these random shards final Set<Integer> shardIds = new HashSet<>( randomSubsetOf(randomIntBetween(1, numberOfShards), IntStream.range(0, numberOfShards).boxed().collect(Collectors.toSet())) ); final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); try { for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = (MockTransportService) getFollowerCluster().getInstance( TransportService.class, senderNode.getName() ); senderTransportService.addSendBehavior((connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Remove.ACTION_NAME.equals(action) || TransportActionProxy.getProxyAction(RetentionLeaseActions.Remove.ACTION_NAME).equals(action)) { final RetentionLeaseActions.RemoveRequest removeRequest = (RetentionLeaseActions.RemoveRequest) request; if (shardIds.contains(removeRequest.getShardId().id())) { throw randomBoolean() ? new ConnectTransportException(connection.getNode(), "connection failed") : new IndexShardClosedException(removeRequest.getShardId()); } } connection.sendRequest(requestId, action, request, options); }); } final ElasticsearchException e = expectThrows( ElasticsearchException.class, () -> followerClient().execute(UnfollowAction.INSTANCE, new UnfollowAction.Request(followerIndex)).actionGet() ); final ClusterStateResponse followerIndexClusterState = followerClient().admin() .cluster() .prepareState() .clear() .setMetadata(true) .setIndices(followerIndex) .get(); final String followerUUID = followerIndexClusterState.getState().metadata().index(followerIndex).getIndexUUID(); final ClusterStateResponse leaderIndexClusterState = leaderClient().admin() .cluster() .prepareState() .clear() .setMetadata(true) .setIndices(leaderIndex) .get(); final String leaderUUID = leaderIndexClusterState.getState().metadata().index(leaderIndex).getIndexUUID(); assertThat( e.getMetadata("es.failed_to_remove_retention_leases"), contains( retentionLeaseId( getFollowerCluster().getClusterName(), new Index(followerIndex, followerUUID), getLeaderCluster().getClusterName(), new Index(leaderIndex, leaderUUID) ) ) ); } finally { for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = (MockTransportService) getFollowerCluster().getInstance( TransportService.class, senderNode.getName() ); senderTransportService.clearAllRules(); } } } public void testRetentionLeaseRenewedWhileFollowing() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; final int numberOfShards = randomIntBetween(1, 4); final int numberOfReplicas = randomIntBetween(0, 1); final Map<String, String> additionalIndexSettings = new HashMap<>(); additionalIndexSettings.put( IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(200).getStringRep() ); final String leaderIndexSettings = getIndexSettings(numberOfShards, numberOfReplicas, additionalIndexSettings); assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); ensureLeaderYellow(leaderIndex); final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); ensureFollowerGreen(true, followerIndex); assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); } public void testRetentionLeaseAdvancesWhileFollowing() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; final int numberOfShards = randomIntBetween(1, 4); final int numberOfReplicas = randomIntBetween(0, 1); final Map<String, String> additionalIndexSettings = new HashMap<>(); additionalIndexSettings.put( IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(200).getStringRep() ); final String leaderIndexSettings = getIndexSettings(numberOfShards, numberOfReplicas, additionalIndexSettings); assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); ensureLeaderYellow(leaderIndex); final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); ensureFollowerGreen(true, followerIndex); final int numberOfDocuments = randomIntBetween(128, 2048); logger.debug("indexing [{}] docs", numberOfDocuments); for (int i = 0; i < numberOfDocuments; i++) { final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); leaderClient().prepareIndex(leaderIndex).setId(Integer.toString(i)).setSource(source, XContentType.JSON).get(); if (rarely()) { leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get(); } } // wait until the follower global checkpoints have caught up to the leader assertIndexFullyReplicatedToFollower(leaderIndex, followerIndex); final List<ShardStats> leaderShardsStats = getShardsStats(leaderClient().admin().indices().prepareStats(leaderIndex).get()); final Map<Integer, Long> leaderGlobalCheckpoints = new HashMap<>(); for (final ShardStats leaderShardStats : leaderShardsStats) { final ShardRouting routing = leaderShardStats.getShardRouting(); if (routing.primary() == false) { continue; } leaderGlobalCheckpoints.put(routing.id(), leaderShardStats.getSeqNoStats().getGlobalCheckpoint()); } // now assert that the retention leases have advanced to the global checkpoints assertBusy(() -> { final IndicesStatsResponse stats = leaderClient().admin() .indices() .stats(new IndicesStatsRequest().clear().indices(leaderIndex)) .actionGet(); assertNotNull(stats.getShards()); assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); final List<ShardStats> shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); final Map<String, RetentionLease> currentRetentionLeases = RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases( shardsStats.get(i).getRetentionLeaseStats().retentionLeases() ); assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.values(), hasSize(1)); final RetentionLease retentionLease = currentRetentionLeases.values().iterator().next(); assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); // we assert that retention leases are being advanced assertThat( retentionLease.retainingSequenceNumber(), equalTo(leaderGlobalCheckpoints.get(shardsStats.get(i).getShardRouting().id()) + 1) ); } }); } public void testRetentionLeaseRenewalIsCancelledWhenFollowingIsPaused() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; final int numberOfShards = randomIntBetween(1, 4); final int numberOfReplicas = randomIntBetween(0, 1); final Map<String, String> additionalIndexSettings = new HashMap<>(); additionalIndexSettings.put( IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(200).getStringRep() ); final String leaderIndexSettings = getIndexSettings(numberOfShards, numberOfReplicas, additionalIndexSettings); assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); ensureLeaderYellow(leaderIndex); final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); ensureFollowerGreen(true, followerIndex); final long start = System.nanoTime(); pauseFollow(followerIndex); /* * We want to ensure that the retention leases have been synced to all shard copies, as otherwise they might sync between the two * times that we sample the retention leases, which would cause our check to fail. */ final TimeValue syncIntervalSetting = IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.get( leaderClient().admin().indices().prepareGetSettings(leaderIndex).get().getIndexToSettings().get(leaderIndex) ); final long syncEnd = System.nanoTime(); Thread.sleep(Math.max(0, randomIntBetween(2, 4) * syncIntervalSetting.millis() - TimeUnit.NANOSECONDS.toMillis(syncEnd - start))); final ClusterStateResponse leaderIndexClusterState = leaderClient().admin() .cluster() .prepareState() .clear() .setMetadata(true) .setIndices(leaderIndex) .get(); final String leaderUUID = leaderIndexClusterState.getState().metadata().index(leaderIndex).getIndexUUID(); /* * We want to ensure that the background renewal is cancelled after pausing. To do this, we will sleep a small multiple of the renew * interval. If the renews are not cancelled, we expect that a renewal would have been sent while we were sleeping. After we wake * up, it should be the case that the retention leases are the same (same timestamp) as that indicates that they were not renewed * while we were sleeping. */ assertBusy(() -> { // sample the leases after pausing final List<Map<String, RetentionLease>> retentionLeases = new ArrayList<>(); assertBusy(() -> { retentionLeases.clear(); final IndicesStatsResponse stats = leaderClient().admin() .indices() .stats(new IndicesStatsRequest().clear().indices(leaderIndex)) .actionGet(); assertNotNull(stats.getShards()); assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); final List<ShardStats> shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); final Map<String, RetentionLease> currentRetentionLeases = RetentionLeaseUtils .toMapExcludingPeerRecoveryRetentionLeases(shardsStats.get(i).getRetentionLeaseStats().retentionLeases()); assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.values(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin() .cluster() .prepareState() .clear() .setMetadata(true) .setIndices(followerIndex) .get(); final String followerUUID = followerIndexClusterState.getState().metadata().index(followerIndex).getIndexUUID(); final RetentionLease retentionLease = currentRetentionLeases.values().iterator().next(); final String expectedRetentionLeaseId = retentionLeaseId( getFollowerCluster().getClusterName(), new Index(followerIndex, followerUUID), getLeaderCluster().getClusterName(), new Index(leaderIndex, leaderUUID) ); assertThat(retentionLease.id(), equalTo(expectedRetentionLeaseId)); retentionLeases.add(currentRetentionLeases); } }); // sleep a small multiple of the renew interval final TimeValue renewIntervalSetting = CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.get(followerClusterSettings()); final long renewEnd = System.nanoTime(); Thread.sleep( Math.max(0, randomIntBetween(2, 4) * renewIntervalSetting.millis() - TimeUnit.NANOSECONDS.toMillis(renewEnd - start)) ); // now ensure that the retention leases are the same final IndicesStatsResponse stats = leaderClient().admin() .indices() .stats(new IndicesStatsRequest().clear().indices(leaderIndex)) .actionGet(); assertNotNull(stats.getShards()); assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); final List<ShardStats> shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { if (shardsStats.get(i).getShardRouting().primary() == false) { continue; } assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); final Map<String, RetentionLease> currentRetentionLeases = RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases( shardsStats.get(i).getRetentionLeaseStats().retentionLeases() ); assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.values(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin() .cluster() .prepareState() .clear() .setMetadata(true) .setIndices(followerIndex) .get(); final String followerUUID = followerIndexClusterState.getState().metadata().index(followerIndex).getIndexUUID(); final RetentionLease retentionLease = currentRetentionLeases.values().iterator().next(); assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, followerUUID, leaderIndex, leaderUUID))); // we assert that retention leases are not being renewed by an unchanged timestamp assertThat(retentionLease.timestamp(), equalTo(retentionLeases.get(i).values().iterator().next().timestamp())); } }); } public void testRetentionLeaseRenewalIsResumedWhenFollowingIsResumed() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; final int numberOfShards = randomIntBetween(1, 4); final int numberOfReplicas = randomIntBetween(0, 1); final Map<String, String> additionalIndexSettings = new HashMap<>(); additionalIndexSettings.put( IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(200).getStringRep() ); final String leaderIndexSettings = getIndexSettings(numberOfShards, numberOfReplicas, additionalIndexSettings); assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); ensureLeaderYellow(leaderIndex); final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); ensureFollowerGreen(true, followerIndex); pauseFollow(followerIndex); followerClient().execute(ResumeFollowAction.INSTANCE, resumeFollow(followerIndex)).actionGet(); ensureFollowerGreen(true, followerIndex); assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); } public void testRetentionLeaseIsAddedIfItDisappearsWhileFollowing() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; final int numberOfShards = 1; final int numberOfReplicas = 1; final Map<String, String> additionalIndexSettings = new HashMap<>(); additionalIndexSettings.put( IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(200).getStringRep() ); final String leaderIndexSettings = getIndexSettings(numberOfShards, numberOfReplicas, additionalIndexSettings); assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); ensureLeaderYellow(leaderIndex); final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); ensureFollowerGreen(true, followerIndex); final CountDownLatch latch = new CountDownLatch(1); final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); try { for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = (MockTransportService) getFollowerCluster().getInstance( TransportService.class, senderNode.getName() ); senderTransportService.addSendBehavior((connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Renew.ACTION_NAME.equals(action) || TransportActionProxy.getProxyAction(RetentionLeaseActions.Renew.ACTION_NAME).equals(action)) { final RetentionLeaseActions.RenewRequest renewRequest = (RetentionLeaseActions.RenewRequest) request; final String retentionLeaseId = getRetentionLeaseId(followerIndex, leaderIndex); if (retentionLeaseId.equals(renewRequest.getId())) { logger.info("--> intercepting renewal request for retention lease [{}]", retentionLeaseId); senderTransportService.clearAllRules(); final String primaryShardNodeId = getLeaderCluster().clusterService() .state() .routingTable() .index(leaderIndex) .shard(renewRequest.getShardId().id()) .primaryShard() .currentNodeId(); final String primaryShardNodeName = getLeaderCluster().clusterService() .state() .nodes() .get(primaryShardNodeId) .getName(); final IndexShard primary = getLeaderCluster().getInstance(IndicesService.class, primaryShardNodeName) .getShardOrNull(renewRequest.getShardId()); final CountDownLatch innerLatch = new CountDownLatch(1); try { // this forces the background renewal from following to face a retention lease not found exception logger.info("--> removing retention lease [{}] on the leader", retentionLeaseId); primary.removeRetentionLease( retentionLeaseId, ActionListener.wrap(r -> innerLatch.countDown(), e -> fail(e.toString())) ); logger.info( "--> waiting for the removed retention lease [{}] to be synced on the leader", retentionLeaseId ); innerLatch.await(); logger.info("--> removed retention lease [{}] on the leader", retentionLeaseId); } catch (final Exception e) { throw new AssertionError("failed to remove retention lease [" + retentionLeaseId + "] on the leader"); } finally { latch.countDown(); } } } connection.sendRequest(requestId, action, request, options); }); } latch.await(); assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); } finally { for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = (MockTransportService) getFollowerCluster().getInstance( TransportService.class, senderNode.getName() ); senderTransportService.clearAllRules(); } } } /** * This test is fairly evil. This test is to ensure that we are protected against a race condition when unfollowing and a background * renewal fires. The action of unfollowing will remove retention leases from the leader. If a background renewal is firing at that * time, it means that we will be met with a retention lease not found exception. That will in turn trigger behavior to attempt to * re-add the retention lease, which means we are left in a situation where we have unfollowed, but the retention lease still remains * on the leader. However, we have a guard against this in the callback after the retention lease not found exception is thrown, which * checks if the shard follow node task is cancelled or completed. * * To test this this behavior is correct, we capture the call to renew the retention lease. Then, we will step in between and execute * an unfollow request. This will remove the retention lease on the leader. At this point, we can unlatch the renew call, which will * now be met with a retention lease not found exception. We will cheat and wait for that response to come back, and then synchronously * trigger the listener which will check to see if the shard follow node task is cancelled or completed, and if not, add the retention * lease back. After that listener returns, we can check to see if a retention lease exists on the leader. * * Note, this done mean that listener will fire twice, once in our onResponseReceived hook, and once after our onResponseReceived * callback returns. 🤷‍♀️ * * @throws Exception if an exception occurs in the main test thread */ public void testPeriodicRenewalDoesNotAddRetentionLeaseAfterUnfollow() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; final int numberOfShards = 1; final int numberOfReplicas = 1; final Map<String, String> additionalIndexSettings = new HashMap<>(); additionalIndexSettings.put( IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(200).getStringRep() ); final String leaderIndexSettings = getIndexSettings(numberOfShards, numberOfReplicas, additionalIndexSettings); assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); ensureLeaderYellow(leaderIndex); final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); ensureFollowerGreen(true, followerIndex); final CountDownLatch removeLeaseLatch = new CountDownLatch(1); final CountDownLatch unfollowLatch = new CountDownLatch(1); final CountDownLatch responseLatch = new CountDownLatch(1); final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); try { for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = (MockTransportService) getFollowerCluster().getInstance( TransportService.class, senderNode.getName() ); senderTransportService.addSendBehavior((connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Renew.ACTION_NAME.equals(action) || TransportActionProxy.getProxyAction(RetentionLeaseActions.Renew.ACTION_NAME).equals(action)) { final String retentionLeaseId = getRetentionLeaseId(followerIndex, leaderIndex); logger.info("--> blocking renewal request for retention lease [{}] until unfollowed", retentionLeaseId); try { removeLeaseLatch.countDown(); unfollowLatch.await(); senderTransportService.addMessageListener(new TransportMessageListener() { @SuppressWarnings("rawtypes") @Override public void onResponseReceived(final long responseRequestId, final Transport.ResponseContext context) { if (requestId == responseRequestId) { final RetentionLeaseNotFoundException e = new RetentionLeaseNotFoundException(retentionLeaseId); context.handler().handleException(new RemoteTransportException(e.getMessage(), e)); responseLatch.countDown(); senderTransportService.removeMessageListener(this); } } }); } catch (final InterruptedException e) { Thread.currentThread().interrupt(); fail(e.toString()); } } connection.sendRequest(requestId, action, request, options); }); } removeLeaseLatch.await(); pauseFollow(followerIndex); assertAcked(followerClient().admin().indices().close(new CloseIndexRequest(followerIndex)).actionGet()); assertAcked(followerClient().execute(UnfollowAction.INSTANCE, new UnfollowAction.Request(followerIndex)).actionGet()); unfollowLatch.countDown(); responseLatch.await(); final IndicesStatsResponse afterUnfollowStats = leaderClient().admin() .indices() .stats(new IndicesStatsRequest().clear().indices(leaderIndex)) .actionGet(); final List<ShardStats> afterUnfollowShardsStats = getShardsStats(afterUnfollowStats); for (final ShardStats shardStats : afterUnfollowShardsStats) { assertNotNull(shardStats.getRetentionLeaseStats()); assertThat( Strings.toString(shardStats), RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(shardStats.getRetentionLeaseStats().retentionLeases()) .values(), empty() ); } } finally { for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = (MockTransportService) getFollowerCluster().getInstance( TransportService.class, senderNode.getName() ); senderTransportService.clearAllRules(); } } } public void testForgetFollower() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; final int numberOfShards = randomIntBetween(1, 4); final String leaderIndexSettings = getIndexSettings(numberOfShards, 0); assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); ensureLeaderYellow(leaderIndex); final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); ensureFollowerGreen(true, followerIndex); pauseFollow(followerIndex); followerClient().admin().indices().close(new CloseIndexRequest(followerIndex).masterNodeTimeout(TimeValue.MAX_VALUE)).actionGet(); final ClusterStateResponse followerIndexClusterState = followerClient().admin() .cluster() .prepareState() .clear() .setMetadata(true) .setIndices(followerIndex) .get(); final String followerUUID = followerIndexClusterState.getState().metadata().index(followerIndex).getIndexUUID(); final BroadcastResponse forgetFollowerResponse = leaderClient().execute( ForgetFollowerAction.INSTANCE, new ForgetFollowerAction.Request( getFollowerCluster().getClusterName(), followerIndex, followerUUID, "leader_cluster", leaderIndex ) ).actionGet(); logger.info(Strings.toString(forgetFollowerResponse)); assertThat(forgetFollowerResponse.getTotalShards(), equalTo(numberOfShards)); assertThat(forgetFollowerResponse.getSuccessfulShards(), equalTo(numberOfShards)); assertThat(forgetFollowerResponse.getFailedShards(), equalTo(0)); assertThat(forgetFollowerResponse.getShardFailures(), emptyArray()); final IndicesStatsResponse afterForgetFollowerStats = leaderClient().admin() .indices() .stats(new IndicesStatsRequest().clear().indices(leaderIndex)) .actionGet(); final List<ShardStats> afterForgetFollowerShardsStats = getShardsStats(afterForgetFollowerStats); for (final ShardStats shardStats : afterForgetFollowerShardsStats) { assertNotNull(shardStats.getRetentionLeaseStats()); assertThat( Strings.toString(shardStats), RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(shardStats.getRetentionLeaseStats().retentionLeases()) .values(), empty() ); } } private void assertRetentionLeaseRenewal( final int numberOfShards, final int numberOfReplicas, final String followerIndex, final String leaderIndex ) throws Exception { // ensure that a retention lease has been put in place on each shard, and grab a copy of them final List<Map<String, RetentionLease>> retentionLeases = new ArrayList<>(); assertBusy(() -> { retentionLeases.clear(); final IndicesStatsResponse stats = leaderClient().admin() .indices() .stats(new IndicesStatsRequest().clear().indices(leaderIndex)) .actionGet(); assertNotNull(stats.getShards()); assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); final List<ShardStats> shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); final Map<String, RetentionLease> currentRetentionLeases = RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases( shardsStats.get(i).getRetentionLeaseStats().retentionLeases() ); assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.values(), hasSize(1)); final RetentionLease retentionLease = currentRetentionLeases.values().iterator().next(); assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); retentionLeases.add(currentRetentionLeases); } }); // now ensure that the retention leases are being renewed assertBusy(() -> { final IndicesStatsResponse stats = leaderClient().admin() .indices() .stats(new IndicesStatsRequest().clear().indices(leaderIndex)) .actionGet(); assertNotNull(stats.getShards()); assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); final List<ShardStats> shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); final Map<String, RetentionLease> currentRetentionLeases = RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases( shardsStats.get(i).getRetentionLeaseStats().retentionLeases() ); assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.values(), hasSize(1)); final RetentionLease retentionLease = currentRetentionLeases.values().iterator().next(); assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); // we assert that retention leases are being renewed by an increase in the timestamp assertThat(retentionLease.timestamp(), greaterThan(retentionLeases.get(i).values().iterator().next().timestamp())); } }); } /** * Extract the shard stats from an indices stats response, with the stats ordered by shard ID with primaries first. This is to have a * consistent ordering when comparing two responses. * * @param stats the indices stats * @return the shard stats in sorted order with (shard ID, primary) as the sort key */ private List<ShardStats> getShardsStats(final IndicesStatsResponse stats) { return Arrays.stream(stats.getShards()).sorted((s, t) -> { if (s.getShardRouting().shardId().id() == t.getShardRouting().shardId().id()) { return -Boolean.compare(s.getShardRouting().primary(), t.getShardRouting().primary()); } else { return Integer.compare(s.getShardRouting().shardId().id(), t.getShardRouting().shardId().id()); } }).collect(Collectors.toList()); } private String getRetentionLeaseId(final String followerIndex, final String leaderIndex) { final ClusterStateResponse followerIndexClusterState = followerClient().admin() .cluster() .prepareState() .clear() .setMetadata(true) .setIndices(followerIndex) .get(); final String followerUUID = followerIndexClusterState.getState().metadata().index(followerIndex).getIndexUUID(); final ClusterStateResponse leaderIndexClusterState = leaderClient().admin() .cluster() .prepareState() .clear() .setMetadata(true) .setIndices(leaderIndex) .get(); final String leaderUUID = leaderIndexClusterState.getState().metadata().index(leaderIndex).getIndexUUID(); return getRetentionLeaseId(followerIndex, followerUUID, leaderIndex, leaderUUID); } private String getRetentionLeaseId(String followerIndex, String followerUUID, String leaderIndex, String leaderUUID) { return retentionLeaseId( getFollowerCluster().getClusterName(), new Index(followerIndex, followerUUID), getLeaderCluster().getClusterName(), new Index(leaderIndex, leaderUUID) ); } private void assertExpectedDocument(final String followerIndex, final int value) { final GetResponse getResponse = followerClient().prepareGet(followerIndex, Integer.toString(value)).get(); assertTrue("doc with id [" + value + "] is missing", getResponse.isExists()); if (sourceEnabled) { assertTrue((getResponse.getSource().containsKey("f"))); assertThat(getResponse.getSource().get("f"), equalTo(value)); } } }
apache-2.0
jknguyen/josephknguyen-selenium
java/client/test/org/openqa/selenium/SvgDocumentTest.java
2676
/* Copyright 2007-2012 Selenium committers Portions copyright 2011-2012 Software Freedom Conservancy Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.openqa.selenium; import org.junit.Test; import org.openqa.selenium.testing.Ignore; import org.openqa.selenium.testing.JUnit4TestBase; import static org.junit.Assert.assertEquals; import static org.junit.Assume.assumeFalse; import static org.openqa.selenium.testing.Ignore.Driver.CHROME; import static org.openqa.selenium.testing.Ignore.Driver.HTMLUNIT; import static org.openqa.selenium.testing.Ignore.Driver.OPERA; import static org.openqa.selenium.testing.Ignore.Driver.OPERA_MOBILE; import static org.openqa.selenium.testing.Ignore.Driver.SAFARI; import static org.openqa.selenium.testing.TestUtilities.getFirefoxVersion; import static org.openqa.selenium.testing.TestUtilities.isFirefox; import static org.openqa.selenium.testing.TestUtilities.isOldIe; @Ignore(value = {HTMLUNIT, OPERA, OPERA_MOBILE, SAFARI}, reason = "HtmlUnit: SVG interaction is only implemented in rendered browsers;" + "Safari: SafariDriver cannot manipulate SVG documents") public class SvgDocumentTest extends JUnit4TestBase { @Test @Ignore(value = CHROME, reason = "chromedriver needs to update atoms for latest SVG support") public void testClickOnSvgElement() { assumeFalse("IE version < 9 doesn't support SVG", isOldIe(driver)); assumeFalse("Firefox < 21 fails this test", isFirefox(driver) && (getFirefoxVersion(driver) < 21)); driver.get(pages.svgTestPage); WebElement rect = driver.findElement(By.id("rect")); assertEquals("blue", rect.getAttribute("fill")); rect.click(); assertEquals("green", rect.getAttribute("fill")); } @Test public void testExecuteScriptInSvgDocument() { assumeFalse("IE version < 9 doesn't support SVG", isOldIe(driver)); driver.get(pages.svgTestPage); WebElement rect = driver.findElement(By.id("rect")); assertEquals("blue", rect.getAttribute("fill")); ((JavascriptExecutor) driver).executeScript("document.getElementById('rect').setAttribute('fill', 'yellow');"); assertEquals("yellow", rect.getAttribute("fill")); } }
apache-2.0
paolodenti/openhab
bundles/binding/org.openhab.binding.tinkerforge/src/main/java/org/openhab/binding/tinkerforge/internal/model/impl/LaserRangeFinderDistanceImpl.java
35573
/** * Copyright (c) 2010-2016, openHAB.org and others. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html */ /** */ package org.openhab.binding.tinkerforge.internal.model.impl; import java.lang.reflect.InvocationTargetException; import java.math.BigDecimal; import java.util.concurrent.atomic.AtomicBoolean; import org.eclipse.emf.common.notify.Notification; import org.eclipse.emf.common.notify.NotificationChain; import org.eclipse.emf.common.util.EList; import org.eclipse.emf.ecore.EClass; import org.eclipse.emf.ecore.InternalEObject; import org.eclipse.emf.ecore.impl.ENotificationImpl; import org.eclipse.emf.ecore.impl.MinimalEObjectImpl; import org.eclipse.emf.ecore.util.EcoreUtil; import org.openhab.binding.tinkerforge.internal.LoggerConstants; import org.openhab.binding.tinkerforge.internal.TinkerforgeErrorHandler; import org.openhab.binding.tinkerforge.internal.model.CallbackListener; import org.openhab.binding.tinkerforge.internal.model.LaserRangeFinderDistance; import org.openhab.binding.tinkerforge.internal.model.MBrickletLaserRangeFinder; import org.openhab.binding.tinkerforge.internal.model.MSensor; import org.openhab.binding.tinkerforge.internal.model.MSubDeviceHolder; import org.openhab.binding.tinkerforge.internal.model.MTFConfigConsumer; import org.openhab.binding.tinkerforge.internal.model.ModelPackage; import org.openhab.binding.tinkerforge.internal.model.TFBaseConfiguration; import org.openhab.binding.tinkerforge.internal.tools.Tools; import org.openhab.binding.tinkerforge.internal.types.DecimalValue; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.tinkerforge.BrickletLaserRangeFinder; import com.tinkerforge.NotConnectedException; import com.tinkerforge.TimeoutException; /** * <!-- begin-user-doc --> * An implementation of the model object '<em><b>Laser Range Finder Distance</b></em>'. * <!-- end-user-doc --> * <p> * The following features are implemented: * <ul> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.LaserRangeFinderDistanceImpl#getLogger <em>Logger</em> * }</li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.LaserRangeFinderDistanceImpl#getUid <em>Uid</em>}</li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.LaserRangeFinderDistanceImpl#isPoll <em>Poll</em>} * </li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.LaserRangeFinderDistanceImpl#getEnabledA * <em>Enabled A</em>}</li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.LaserRangeFinderDistanceImpl#getSubId <em>Sub Id</em>} * </li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.LaserRangeFinderDistanceImpl#getMbrick <em>Mbrick</em> * }</li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.LaserRangeFinderDistanceImpl#getTfConfig * <em>Tf Config</em>}</li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.LaserRangeFinderDistanceImpl#getSensorValue * <em>Sensor Value</em>}</li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.LaserRangeFinderDistanceImpl#getCallbackPeriod * <em>Callback Period</em>}</li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.LaserRangeFinderDistanceImpl#getDeviceType * <em>Device Type</em>}</li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.LaserRangeFinderDistanceImpl#getThreshold * <em>Threshold</em>}</li> * </ul> * </p> * * @generated */ public class LaserRangeFinderDistanceImpl extends MinimalEObjectImpl.Container implements LaserRangeFinderDistance { /** * The default value of the '{@link #getLogger() <em>Logger</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getLogger() * @generated * @ordered */ protected static final Logger LOGGER_EDEFAULT = null; /** * The cached value of the '{@link #getLogger() <em>Logger</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getLogger() * @generated * @ordered */ protected Logger logger = LOGGER_EDEFAULT; /** * The default value of the '{@link #getUid() <em>Uid</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getUid() * @generated * @ordered */ protected static final String UID_EDEFAULT = null; /** * The cached value of the '{@link #getUid() <em>Uid</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getUid() * @generated * @ordered */ protected String uid = UID_EDEFAULT; /** * The default value of the '{@link #isPoll() <em>Poll</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #isPoll() * @generated * @ordered */ protected static final boolean POLL_EDEFAULT = true; /** * The cached value of the '{@link #isPoll() <em>Poll</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #isPoll() * @generated * @ordered */ protected boolean poll = POLL_EDEFAULT; /** * The default value of the '{@link #getEnabledA() <em>Enabled A</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getEnabledA() * @generated * @ordered */ protected static final AtomicBoolean ENABLED_A_EDEFAULT = null; /** * The cached value of the '{@link #getEnabledA() <em>Enabled A</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getEnabledA() * @generated * @ordered */ protected AtomicBoolean enabledA = ENABLED_A_EDEFAULT; /** * The default value of the '{@link #getSubId() <em>Sub Id</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getSubId() * @generated * @ordered */ protected static final String SUB_ID_EDEFAULT = null; /** * The cached value of the '{@link #getSubId() <em>Sub Id</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getSubId() * @generated * @ordered */ protected String subId = SUB_ID_EDEFAULT; /** * The cached value of the '{@link #getTfConfig() <em>Tf Config</em>}' containment reference. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getTfConfig() * @generated * @ordered */ protected TFBaseConfiguration tfConfig; /** * The cached value of the '{@link #getSensorValue() <em>Sensor Value</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getSensorValue() * @generated * @ordered */ protected DecimalValue sensorValue; /** * The default value of the '{@link #getCallbackPeriod() <em>Callback Period</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getCallbackPeriod() * @generated * @ordered */ protected static final long CALLBACK_PERIOD_EDEFAULT = 1000L; /** * The cached value of the '{@link #getCallbackPeriod() <em>Callback Period</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getCallbackPeriod() * @generated * @ordered */ protected long callbackPeriod = CALLBACK_PERIOD_EDEFAULT; /** * The default value of the '{@link #getDeviceType() <em>Device Type</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getDeviceType() * @generated * @ordered */ protected static final String DEVICE_TYPE_EDEFAULT = "laser_range_finder_distance"; /** * The cached value of the '{@link #getDeviceType() <em>Device Type</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getDeviceType() * @generated * @ordered */ protected String deviceType = DEVICE_TYPE_EDEFAULT; /** * The default value of the '{@link #getThreshold() <em>Threshold</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getThreshold() * @generated * @ordered */ protected static final BigDecimal THRESHOLD_EDEFAULT = new BigDecimal("0"); /** * The cached value of the '{@link #getThreshold() <em>Threshold</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getThreshold() * @generated * @ordered */ protected BigDecimal threshold = THRESHOLD_EDEFAULT; private BrickletLaserRangeFinder tinkerforgeDevice; private DistanceListener listener; /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ protected LaserRangeFinderDistanceImpl() { super(); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override protected EClass eStaticClass() { return ModelPackage.Literals.LASER_RANGE_FINDER_DISTANCE; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public Logger getLogger() { return logger; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setLogger(Logger newLogger) { Logger oldLogger = logger; logger = newLogger; if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.LASER_RANGE_FINDER_DISTANCE__LOGGER, oldLogger, logger)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public String getUid() { return uid; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setUid(String newUid) { String oldUid = uid; uid = newUid; if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.LASER_RANGE_FINDER_DISTANCE__UID, oldUid, uid)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public boolean isPoll() { return poll; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setPoll(boolean newPoll) { boolean oldPoll = poll; poll = newPoll; if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.LASER_RANGE_FINDER_DISTANCE__POLL, oldPoll, poll)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public AtomicBoolean getEnabledA() { return enabledA; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setEnabledA(AtomicBoolean newEnabledA) { AtomicBoolean oldEnabledA = enabledA; enabledA = newEnabledA; if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.LASER_RANGE_FINDER_DISTANCE__ENABLED_A, oldEnabledA, enabledA)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public String getSubId() { return subId; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setSubId(String newSubId) { String oldSubId = subId; subId = newSubId; if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.LASER_RANGE_FINDER_DISTANCE__SUB_ID, oldSubId, subId)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public MBrickletLaserRangeFinder getMbrick() { if (eContainerFeatureID() != ModelPackage.LASER_RANGE_FINDER_DISTANCE__MBRICK) { return null; } return (MBrickletLaserRangeFinder) eContainer(); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public NotificationChain basicSetMbrick(MBrickletLaserRangeFinder newMbrick, NotificationChain msgs) { msgs = eBasicSetContainer((InternalEObject) newMbrick, ModelPackage.LASER_RANGE_FINDER_DISTANCE__MBRICK, msgs); return msgs; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setMbrick(MBrickletLaserRangeFinder newMbrick) { if (newMbrick != eInternalContainer() || (eContainerFeatureID() != ModelPackage.LASER_RANGE_FINDER_DISTANCE__MBRICK && newMbrick != null)) { if (EcoreUtil.isAncestor(this, newMbrick)) { throw new IllegalArgumentException("Recursive containment not allowed for " + toString()); } NotificationChain msgs = null; if (eInternalContainer() != null) { msgs = eBasicRemoveFromContainer(msgs); } if (newMbrick != null) { msgs = ((InternalEObject) newMbrick).eInverseAdd(this, ModelPackage.MSUB_DEVICE_HOLDER__MSUBDEVICES, MSubDeviceHolder.class, msgs); } msgs = basicSetMbrick(newMbrick, msgs); if (msgs != null) { msgs.dispatch(); } } else if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.LASER_RANGE_FINDER_DISTANCE__MBRICK, newMbrick, newMbrick)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public DecimalValue getSensorValue() { return sensorValue; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setSensorValue(DecimalValue newSensorValue) { DecimalValue oldSensorValue = sensorValue; sensorValue = newSensorValue; if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.LASER_RANGE_FINDER_DISTANCE__SENSOR_VALUE, oldSensorValue, sensorValue)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public TFBaseConfiguration getTfConfig() { return tfConfig; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public NotificationChain basicSetTfConfig(TFBaseConfiguration newTfConfig, NotificationChain msgs) { TFBaseConfiguration oldTfConfig = tfConfig; tfConfig = newTfConfig; if (eNotificationRequired()) { ENotificationImpl notification = new ENotificationImpl(this, Notification.SET, ModelPackage.LASER_RANGE_FINDER_DISTANCE__TF_CONFIG, oldTfConfig, newTfConfig); if (msgs == null) { msgs = notification; } else { msgs.add(notification); } } return msgs; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setTfConfig(TFBaseConfiguration newTfConfig) { if (newTfConfig != tfConfig) { NotificationChain msgs = null; if (tfConfig != null) { msgs = ((InternalEObject) tfConfig).eInverseRemove(this, EOPPOSITE_FEATURE_BASE - ModelPackage.LASER_RANGE_FINDER_DISTANCE__TF_CONFIG, null, msgs); } if (newTfConfig != null) { msgs = ((InternalEObject) newTfConfig).eInverseAdd(this, EOPPOSITE_FEATURE_BASE - ModelPackage.LASER_RANGE_FINDER_DISTANCE__TF_CONFIG, null, msgs); } msgs = basicSetTfConfig(newTfConfig, msgs); if (msgs != null) { msgs.dispatch(); } } else if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.LASER_RANGE_FINDER_DISTANCE__TF_CONFIG, newTfConfig, newTfConfig)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public long getCallbackPeriod() { return callbackPeriod; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setCallbackPeriod(long newCallbackPeriod) { long oldCallbackPeriod = callbackPeriod; callbackPeriod = newCallbackPeriod; if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.LASER_RANGE_FINDER_DISTANCE__CALLBACK_PERIOD, oldCallbackPeriod, callbackPeriod)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public String getDeviceType() { return deviceType; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public BigDecimal getThreshold() { return threshold; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setThreshold(BigDecimal newThreshold) { BigDecimal oldThreshold = threshold; threshold = newThreshold; if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.LASER_RANGE_FINDER_DISTANCE__THRESHOLD, oldThreshold, threshold)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated NOT */ @Override public void fetchSensorValue() { try { int distance = tinkerforgeDevice.getDistance(); DecimalValue value = Tools.calculate(distance); setSensorValue(value); } catch (TimeoutException e) { TinkerforgeErrorHandler.handleError(this, TinkerforgeErrorHandler.TF_TIMEOUT_EXCEPTION, e); } catch (NotConnectedException e) { TinkerforgeErrorHandler.handleError(this, TinkerforgeErrorHandler.TF_NOT_CONNECTION_EXCEPTION, e); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated NOT */ @Override public void init() { setEnabledA(new AtomicBoolean()); logger = LoggerFactory.getLogger(LaserRangeFinderDistanceImpl.class); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated NOT */ @Override public void enable() { if (tfConfig != null) { if (tfConfig.eIsSet(tfConfig.eClass().getEStructuralFeature("threshold"))) { logger.debug("threshold {}", tfConfig.getThreshold()); setThreshold(tfConfig.getThreshold()); } if (tfConfig.eIsSet(tfConfig.eClass().getEStructuralFeature("callbackPeriod"))) { logger.debug("callbackPeriod {}", tfConfig.getCallbackPeriod()); setCallbackPeriod(tfConfig.getCallbackPeriod()); } } try { tinkerforgeDevice = getMbrick().getTinkerforgeDevice(); tinkerforgeDevice.setVelocityCallbackPeriod(getCallbackPeriod()); listener = new DistanceListener(); tinkerforgeDevice.addDistanceListener(listener); fetchSensorValue(); } catch (TimeoutException e) { TinkerforgeErrorHandler.handleError(this, TinkerforgeErrorHandler.TF_TIMEOUT_EXCEPTION, e); } catch (NotConnectedException e) { TinkerforgeErrorHandler.handleError(this, TinkerforgeErrorHandler.TF_NOT_CONNECTION_EXCEPTION, e); } } private class DistanceListener implements BrickletLaserRangeFinder.DistanceListener { @Override public void distance(int distance) { DecimalValue value = Tools.calculate(distance); logger.trace("{} got new value {}", LoggerConstants.TFMODELUPDATE, value); if (value.compareTo(getSensorValue(), getThreshold()) != 0) { logger.trace("{} setting new value {}", LoggerConstants.TFMODELUPDATE, value); setSensorValue(value); } else { logger.trace("{} omitting new value {}", LoggerConstants.TFMODELUPDATE, value); } } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated NOT */ @Override public void disable() { if (listener != null) { tinkerforgeDevice.removeDistanceListener(listener); } tinkerforgeDevice = null; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public NotificationChain eInverseAdd(InternalEObject otherEnd, int featureID, NotificationChain msgs) { switch (featureID) { case ModelPackage.LASER_RANGE_FINDER_DISTANCE__MBRICK: if (eInternalContainer() != null) { msgs = eBasicRemoveFromContainer(msgs); } return basicSetMbrick((MBrickletLaserRangeFinder) otherEnd, msgs); } return super.eInverseAdd(otherEnd, featureID, msgs); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public NotificationChain eInverseRemove(InternalEObject otherEnd, int featureID, NotificationChain msgs) { switch (featureID) { case ModelPackage.LASER_RANGE_FINDER_DISTANCE__MBRICK: return basicSetMbrick(null, msgs); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__TF_CONFIG: return basicSetTfConfig(null, msgs); } return super.eInverseRemove(otherEnd, featureID, msgs); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public NotificationChain eBasicRemoveFromContainerFeature(NotificationChain msgs) { switch (eContainerFeatureID()) { case ModelPackage.LASER_RANGE_FINDER_DISTANCE__MBRICK: return eInternalContainer().eInverseRemove(this, ModelPackage.MSUB_DEVICE_HOLDER__MSUBDEVICES, MSubDeviceHolder.class, msgs); } return super.eBasicRemoveFromContainerFeature(msgs); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public Object eGet(int featureID, boolean resolve, boolean coreType) { switch (featureID) { case ModelPackage.LASER_RANGE_FINDER_DISTANCE__LOGGER: return getLogger(); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__UID: return getUid(); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__POLL: return isPoll(); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__ENABLED_A: return getEnabledA(); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__SUB_ID: return getSubId(); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__MBRICK: return getMbrick(); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__TF_CONFIG: return getTfConfig(); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__SENSOR_VALUE: return getSensorValue(); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__CALLBACK_PERIOD: return getCallbackPeriod(); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__DEVICE_TYPE: return getDeviceType(); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__THRESHOLD: return getThreshold(); } return super.eGet(featureID, resolve, coreType); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void eSet(int featureID, Object newValue) { switch (featureID) { case ModelPackage.LASER_RANGE_FINDER_DISTANCE__LOGGER: setLogger((Logger) newValue); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__UID: setUid((String) newValue); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__POLL: setPoll((Boolean) newValue); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__ENABLED_A: setEnabledA((AtomicBoolean) newValue); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__SUB_ID: setSubId((String) newValue); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__MBRICK: setMbrick((MBrickletLaserRangeFinder) newValue); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__TF_CONFIG: setTfConfig((TFBaseConfiguration) newValue); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__SENSOR_VALUE: setSensorValue((DecimalValue) newValue); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__CALLBACK_PERIOD: setCallbackPeriod((Long) newValue); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__THRESHOLD: setThreshold((BigDecimal) newValue); return; } super.eSet(featureID, newValue); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void eUnset(int featureID) { switch (featureID) { case ModelPackage.LASER_RANGE_FINDER_DISTANCE__LOGGER: setLogger(LOGGER_EDEFAULT); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__UID: setUid(UID_EDEFAULT); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__POLL: setPoll(POLL_EDEFAULT); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__ENABLED_A: setEnabledA(ENABLED_A_EDEFAULT); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__SUB_ID: setSubId(SUB_ID_EDEFAULT); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__MBRICK: setMbrick((MBrickletLaserRangeFinder) null); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__TF_CONFIG: setTfConfig((TFBaseConfiguration) null); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__SENSOR_VALUE: setSensorValue((DecimalValue) null); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__CALLBACK_PERIOD: setCallbackPeriod(CALLBACK_PERIOD_EDEFAULT); return; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__THRESHOLD: setThreshold(THRESHOLD_EDEFAULT); return; } super.eUnset(featureID); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public boolean eIsSet(int featureID) { switch (featureID) { case ModelPackage.LASER_RANGE_FINDER_DISTANCE__LOGGER: return LOGGER_EDEFAULT == null ? logger != null : !LOGGER_EDEFAULT.equals(logger); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__UID: return UID_EDEFAULT == null ? uid != null : !UID_EDEFAULT.equals(uid); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__POLL: return poll != POLL_EDEFAULT; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__ENABLED_A: return ENABLED_A_EDEFAULT == null ? enabledA != null : !ENABLED_A_EDEFAULT.equals(enabledA); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__SUB_ID: return SUB_ID_EDEFAULT == null ? subId != null : !SUB_ID_EDEFAULT.equals(subId); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__MBRICK: return getMbrick() != null; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__TF_CONFIG: return tfConfig != null; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__SENSOR_VALUE: return sensorValue != null; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__CALLBACK_PERIOD: return callbackPeriod != CALLBACK_PERIOD_EDEFAULT; case ModelPackage.LASER_RANGE_FINDER_DISTANCE__DEVICE_TYPE: return DEVICE_TYPE_EDEFAULT == null ? deviceType != null : !DEVICE_TYPE_EDEFAULT.equals(deviceType); case ModelPackage.LASER_RANGE_FINDER_DISTANCE__THRESHOLD: return THRESHOLD_EDEFAULT == null ? threshold != null : !THRESHOLD_EDEFAULT.equals(threshold); } return super.eIsSet(featureID); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public int eBaseStructuralFeatureID(int derivedFeatureID, Class<?> baseClass) { if (baseClass == MTFConfigConsumer.class) { switch (derivedFeatureID) { case ModelPackage.LASER_RANGE_FINDER_DISTANCE__TF_CONFIG: return ModelPackage.MTF_CONFIG_CONSUMER__TF_CONFIG; default: return -1; } } if (baseClass == MSensor.class) { switch (derivedFeatureID) { case ModelPackage.LASER_RANGE_FINDER_DISTANCE__SENSOR_VALUE: return ModelPackage.MSENSOR__SENSOR_VALUE; default: return -1; } } if (baseClass == CallbackListener.class) { switch (derivedFeatureID) { case ModelPackage.LASER_RANGE_FINDER_DISTANCE__CALLBACK_PERIOD: return ModelPackage.CALLBACK_LISTENER__CALLBACK_PERIOD; default: return -1; } } return super.eBaseStructuralFeatureID(derivedFeatureID, baseClass); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public int eDerivedStructuralFeatureID(int baseFeatureID, Class<?> baseClass) { if (baseClass == MTFConfigConsumer.class) { switch (baseFeatureID) { case ModelPackage.MTF_CONFIG_CONSUMER__TF_CONFIG: return ModelPackage.LASER_RANGE_FINDER_DISTANCE__TF_CONFIG; default: return -1; } } if (baseClass == MSensor.class) { switch (baseFeatureID) { case ModelPackage.MSENSOR__SENSOR_VALUE: return ModelPackage.LASER_RANGE_FINDER_DISTANCE__SENSOR_VALUE; default: return -1; } } if (baseClass == CallbackListener.class) { switch (baseFeatureID) { case ModelPackage.CALLBACK_LISTENER__CALLBACK_PERIOD: return ModelPackage.LASER_RANGE_FINDER_DISTANCE__CALLBACK_PERIOD; default: return -1; } } return super.eDerivedStructuralFeatureID(baseFeatureID, baseClass); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public int eDerivedOperationID(int baseOperationID, Class<?> baseClass) { if (baseClass == MTFConfigConsumer.class) { switch (baseOperationID) { default: return -1; } } if (baseClass == MSensor.class) { switch (baseOperationID) { case ModelPackage.MSENSOR___FETCH_SENSOR_VALUE: return ModelPackage.LASER_RANGE_FINDER_DISTANCE___FETCH_SENSOR_VALUE; default: return -1; } } if (baseClass == CallbackListener.class) { switch (baseOperationID) { default: return -1; } } return super.eDerivedOperationID(baseOperationID, baseClass); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public Object eInvoke(int operationID, EList<?> arguments) throws InvocationTargetException { switch (operationID) { case ModelPackage.LASER_RANGE_FINDER_DISTANCE___FETCH_SENSOR_VALUE: fetchSensorValue(); return null; case ModelPackage.LASER_RANGE_FINDER_DISTANCE___INIT: init(); return null; case ModelPackage.LASER_RANGE_FINDER_DISTANCE___ENABLE: enable(); return null; case ModelPackage.LASER_RANGE_FINDER_DISTANCE___DISABLE: disable(); return null; } return super.eInvoke(operationID, arguments); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public String toString() { if (eIsProxy()) { return super.toString(); } StringBuffer result = new StringBuffer(super.toString()); result.append(" (logger: "); result.append(logger); result.append(", uid: "); result.append(uid); result.append(", poll: "); result.append(poll); result.append(", enabledA: "); result.append(enabledA); result.append(", subId: "); result.append(subId); result.append(", sensorValue: "); result.append(sensorValue); result.append(", callbackPeriod: "); result.append(callbackPeriod); result.append(", deviceType: "); result.append(deviceType); result.append(", threshold: "); result.append(threshold); result.append(')'); return result.toString(); } } // LaserRangeFinderDistanceImpl
epl-1.0
jialinsun/cat
cat-home/src/main/java/com/dianping/cat/report/page/overload/task/OverloadReport.java
502
package com.dianping.cat.report.page.overload.task; import com.dianping.cat.core.dal.DailyReport; public class OverloadReport extends DailyReport { private int m_reportType; private double m_reportLength; public double getReportLength() { return m_reportLength; } public int getReportType() { return m_reportType; } public void setReportLength(double reportLength) { m_reportLength = reportLength; } public void setReportType(int reportType) { m_reportType = reportType; } }
apache-2.0
smartan/lucene
src/main/java/org/apache/lucene/util/MathUtil.java
4820
package org.apache.lucene.util; /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.math.BigInteger; /** * Math static utility methods. */ public final class MathUtil { // No instance: private MathUtil() { } /** * Returns {@code x <= 0 ? 0 : Math.floor(Math.log(x) / Math.log(base))} * @param base must be {@code > 1} */ public static int log(long x, int base) { if (base <= 1) { throw new IllegalArgumentException("base must be > 1"); } int ret = 0; while (x >= base) { x /= base; ret++; } return ret; } /** * Calculates logarithm in a given base with doubles. */ public static double log(double base, double x) { return Math.log(x) / Math.log(base); } /** Return the greatest common divisor of <code>a</code> and <code>b</code>, * consistently with {@link BigInteger#gcd(BigInteger)}. * <p><b>NOTE</b>: A greatest common divisor must be positive, but * <code>2^64</code> cannot be expressed as a long although it * is the GCD of {@link Long#MIN_VALUE} and <code>0</code> and the GCD of * {@link Long#MIN_VALUE} and {@link Long#MIN_VALUE}. So in these 2 cases, * and only them, this method will return {@link Long#MIN_VALUE}. */ // see http://en.wikipedia.org/wiki/Binary_GCD_algorithm#Iterative_version_in_C.2B.2B_using_ctz_.28count_trailing_zeros.29 public static long gcd(long a, long b) { a = Math.abs(a); b = Math.abs(b); if (a == 0) { return b; } else if (b == 0) { return a; } final int commonTrailingZeros = Long.numberOfTrailingZeros(a | b); a >>>= Long.numberOfTrailingZeros(a); while (true) { b >>>= Long.numberOfTrailingZeros(b); if (a == b) { break; } else if (a > b || a == Long.MIN_VALUE) { // MIN_VALUE is treated as 2^64 final long tmp = a; a = b; b = tmp; } if (a == 1) { break; } b -= a; } return a << commonTrailingZeros; } /** * Calculates inverse hyperbolic sine of a {@code double} value. * <p> * Special cases: * <ul> * <li>If the argument is NaN, then the result is NaN. * <li>If the argument is zero, then the result is a zero with the same sign as the argument. * <li>If the argument is infinite, then the result is infinity with the same sign as the argument. * </ul> */ public static double asinh(double a) { final double sign; // check the sign bit of the raw representation to handle -0 if (Double.doubleToRawLongBits(a) < 0) { a = Math.abs(a); sign = -1.0d; } else { sign = 1.0d; } return sign * Math.log(Math.sqrt(a * a + 1.0d) + a); } /** * Calculates inverse hyperbolic cosine of a {@code double} value. * <p> * Special cases: * <ul> * <li>If the argument is NaN, then the result is NaN. * <li>If the argument is +1, then the result is a zero. * <li>If the argument is positive infinity, then the result is positive infinity. * <li>If the argument is less than 1, then the result is NaN. * </ul> */ public static double acosh(double a) { return Math.log(Math.sqrt(a * a - 1.0d) + a); } /** * Calculates inverse hyperbolic tangent of a {@code double} value. * <p> * Special cases: * <ul> * <li>If the argument is NaN, then the result is NaN. * <li>If the argument is zero, then the result is a zero with the same sign as the argument. * <li>If the argument is +1, then the result is positive infinity. * <li>If the argument is -1, then the result is negative infinity. * <li>If the argument's absolute value is greater than 1, then the result is NaN. * </ul> */ public static double atanh(double a) { final double mult; // check the sign bit of the raw representation to handle -0 if (Double.doubleToRawLongBits(a) < 0) { a = Math.abs(a); mult = -0.5d; } else { mult = 0.5d; } return mult * Math.log((1.0d + a) / (1.0d - a)); } }
apache-2.0
mahaliachante/aws-sdk-java
aws-java-sdk-rds/src/main/java/com/amazonaws/services/rds/model/CreateDBInstanceReadReplicaRequest.java
58153
/* * Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.rds.model; import java.io.Serializable; import com.amazonaws.AmazonWebServiceRequest; /** * Container for the parameters to the {@link com.amazonaws.services.rds.AmazonRDS#createDBInstanceReadReplica(CreateDBInstanceReadReplicaRequest) CreateDBInstanceReadReplica operation}. * <p> * Creates a DB instance for a DB instance running MySQL or PostgreSQL * that acts as a Read Replica of a source DB instance. * </p> * <p> * All Read Replica DB instances are created as Single-AZ deployments * with backups disabled. All other DB instance attributes (including DB * security groups and DB parameter groups) are inherited from the source * DB instance, except as specified below. * </p> * <p> * <b>IMPORTANT:</b> The source DB instance must have backup retention * enabled. * </p> * * @see com.amazonaws.services.rds.AmazonRDS#createDBInstanceReadReplica(CreateDBInstanceReadReplicaRequest) */ public class CreateDBInstanceReadReplicaRequest extends AmazonWebServiceRequest implements Serializable, Cloneable { /** * The DB instance identifier of the Read Replica. This identifier is the * unique key that identifies a DB instance. This parameter is stored as * a lowercase string. */ private String dBInstanceIdentifier; /** * The identifier of the DB instance that will act as the source for the * Read Replica. Each DB instance can have up to five Read Replicas. * <p>Constraints: <ul> <li>Must be the identifier of an existing DB * instance.</li> <li>Can specify a DB instance that is a MySQL Read * Replica only if the source is running MySQL 5.6.</li> <li>Can specify * a DB instance that is a PostgreSQL Read Replica only if the source is * running PostgreSQL 9.3.5.</li> <li>The specified DB instance must have * automatic backups enabled, its backup retention period must be greater * than 0.</li> <li>If the source DB instance is in the same region as * the Read Replica, specify a valid DB instance identifier.</li> <li>If * the source DB instance is in a different region than the Read Replica, * specify a valid DB instance ARN. For more information, go to <a * href="http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN"> * Constructing a Amazon RDS Amazon Resource Name (ARN)</a>.</li> </ul> */ private String sourceDBInstanceIdentifier; /** * The compute and memory capacity of the Read Replica. <p> Valid Values: * <code>db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | * db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | * db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | * db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | * db.t2.micro | db.t2.small | db.t2.medium</code> <p>Default: Inherits * from the source DB instance. */ private String dBInstanceClass; /** * The Amazon EC2 Availability Zone that the Read Replica will be created * in. <p> Default: A random, system-chosen Availability Zone in the * endpoint's region. <p> Example: <code>us-east-1d</code> */ private String availabilityZone; /** * The port number that the DB instance uses for connections. <p>Default: * Inherits from the source DB instance <p>Valid Values: * <code>1150-65535</code> */ private Integer port; /** * Indicates that minor engine upgrades will be applied automatically to * the Read Replica during the maintenance window. <p>Default: Inherits * from the source DB instance */ private Boolean autoMinorVersionUpgrade; /** * The amount of Provisioned IOPS (input/output operations per second) to * be initially allocated for the DB instance. */ private Integer iops; /** * The option group the DB instance will be associated with. If omitted, * the default option group for the engine specified will be used. */ private String optionGroupName; /** * Specifies the accessibility options for the DB instance. A value of * true specifies an Internet-facing instance with a publicly resolvable * DNS name, which resolves to a public IP address. A value of false * specifies an internal instance with a DNS name that resolves to a * private IP address. <p> Default: The default behavior varies depending * on whether a VPC has been requested or not. The following list shows * the default behavior in each case. <ul> <li> <b>Default * VPC:</b>true</li> <li> <b>VPC:</b>false</li> </ul> <p> If no DB subnet * group has been specified as part of the request and the * PubliclyAccessible value has not been set, the DB instance will be * publicly accessible. If a specific DB subnet group has been specified * as part of the request and the PubliclyAccessible value has not been * set, the DB instance will be private. */ private Boolean publiclyAccessible; /** * A list of tags. */ private com.amazonaws.internal.ListWithAutoConstructFlag<Tag> tags; /** * Specifies a DB subnet group for the DB instance. The new DB instance * will be created in the VPC associated with the DB subnet group. If no * DB subnet group is specified, then the new DB instance is not created * in a VPC. <p>Constraints: <ul> <li>Can only be specified if the source * DB instance identifier specifies a DB instance in another region.</li> * <li>The specified DB subnet group must be in the same region in which * the operation is running.</li> <li> All Read Replicas in one region * that are created from the same source DB instance must either: * <ul><li>Specify DB subnet groups from the same VPC. All these Read * Replicas will be created in the same VPC.</li><li>Not specify a DB * subnet group. All these Read Replicas will be created outside of any * VPC.</li></ul></li> </ul> */ private String dBSubnetGroupName; /** * Specifies the storage type to be associated with the Read Replica. <p> * Valid values: <code>standard | gp2 | io1</code> <p> If you specify * <code>io1</code>, you must also include a value for the * <code>Iops</code> parameter. <p> Default: <code>io1</code> if the * <code>Iops</code> parameter is specified; otherwise * <code>standard</code> */ private String storageType; /** * Default constructor for a new CreateDBInstanceReadReplicaRequest object. Callers should use the * setter or fluent setter (with...) methods to initialize this object after creating it. */ public CreateDBInstanceReadReplicaRequest() {} /** * Constructs a new CreateDBInstanceReadReplicaRequest object. * Callers should use the setter or fluent setter (with...) methods to * initialize any additional object members. * * @param dBInstanceIdentifier The DB instance identifier of the Read * Replica. This identifier is the unique key that identifies a DB * instance. This parameter is stored as a lowercase string. * @param sourceDBInstanceIdentifier The identifier of the DB instance * that will act as the source for the Read Replica. Each DB instance can * have up to five Read Replicas. <p>Constraints: <ul> <li>Must be the * identifier of an existing DB instance.</li> <li>Can specify a DB * instance that is a MySQL Read Replica only if the source is running * MySQL 5.6.</li> <li>Can specify a DB instance that is a PostgreSQL * Read Replica only if the source is running PostgreSQL 9.3.5.</li> * <li>The specified DB instance must have automatic backups enabled, its * backup retention period must be greater than 0.</li> <li>If the source * DB instance is in the same region as the Read Replica, specify a valid * DB instance identifier.</li> <li>If the source DB instance is in a * different region than the Read Replica, specify a valid DB instance * ARN. For more information, go to <a * href="http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN"> * Constructing a Amazon RDS Amazon Resource Name (ARN)</a>.</li> </ul> */ public CreateDBInstanceReadReplicaRequest(String dBInstanceIdentifier, String sourceDBInstanceIdentifier) { setDBInstanceIdentifier(dBInstanceIdentifier); setSourceDBInstanceIdentifier(sourceDBInstanceIdentifier); } /** * The DB instance identifier of the Read Replica. This identifier is the * unique key that identifies a DB instance. This parameter is stored as * a lowercase string. * * @return The DB instance identifier of the Read Replica. This identifier is the * unique key that identifies a DB instance. This parameter is stored as * a lowercase string. */ public String getDBInstanceIdentifier() { return dBInstanceIdentifier; } /** * The DB instance identifier of the Read Replica. This identifier is the * unique key that identifies a DB instance. This parameter is stored as * a lowercase string. * * @param dBInstanceIdentifier The DB instance identifier of the Read Replica. This identifier is the * unique key that identifies a DB instance. This parameter is stored as * a lowercase string. */ public void setDBInstanceIdentifier(String dBInstanceIdentifier) { this.dBInstanceIdentifier = dBInstanceIdentifier; } /** * The DB instance identifier of the Read Replica. This identifier is the * unique key that identifies a DB instance. This parameter is stored as * a lowercase string. * <p> * Returns a reference to this object so that method calls can be chained together. * * @param dBInstanceIdentifier The DB instance identifier of the Read Replica. This identifier is the * unique key that identifies a DB instance. This parameter is stored as * a lowercase string. * * @return A reference to this updated object so that method calls can be chained * together. */ public CreateDBInstanceReadReplicaRequest withDBInstanceIdentifier(String dBInstanceIdentifier) { this.dBInstanceIdentifier = dBInstanceIdentifier; return this; } /** * The identifier of the DB instance that will act as the source for the * Read Replica. Each DB instance can have up to five Read Replicas. * <p>Constraints: <ul> <li>Must be the identifier of an existing DB * instance.</li> <li>Can specify a DB instance that is a MySQL Read * Replica only if the source is running MySQL 5.6.</li> <li>Can specify * a DB instance that is a PostgreSQL Read Replica only if the source is * running PostgreSQL 9.3.5.</li> <li>The specified DB instance must have * automatic backups enabled, its backup retention period must be greater * than 0.</li> <li>If the source DB instance is in the same region as * the Read Replica, specify a valid DB instance identifier.</li> <li>If * the source DB instance is in a different region than the Read Replica, * specify a valid DB instance ARN. For more information, go to <a * href="http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN"> * Constructing a Amazon RDS Amazon Resource Name (ARN)</a>.</li> </ul> * * @return The identifier of the DB instance that will act as the source for the * Read Replica. Each DB instance can have up to five Read Replicas. * <p>Constraints: <ul> <li>Must be the identifier of an existing DB * instance.</li> <li>Can specify a DB instance that is a MySQL Read * Replica only if the source is running MySQL 5.6.</li> <li>Can specify * a DB instance that is a PostgreSQL Read Replica only if the source is * running PostgreSQL 9.3.5.</li> <li>The specified DB instance must have * automatic backups enabled, its backup retention period must be greater * than 0.</li> <li>If the source DB instance is in the same region as * the Read Replica, specify a valid DB instance identifier.</li> <li>If * the source DB instance is in a different region than the Read Replica, * specify a valid DB instance ARN. For more information, go to <a * href="http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN"> * Constructing a Amazon RDS Amazon Resource Name (ARN)</a>.</li> </ul> */ public String getSourceDBInstanceIdentifier() { return sourceDBInstanceIdentifier; } /** * The identifier of the DB instance that will act as the source for the * Read Replica. Each DB instance can have up to five Read Replicas. * <p>Constraints: <ul> <li>Must be the identifier of an existing DB * instance.</li> <li>Can specify a DB instance that is a MySQL Read * Replica only if the source is running MySQL 5.6.</li> <li>Can specify * a DB instance that is a PostgreSQL Read Replica only if the source is * running PostgreSQL 9.3.5.</li> <li>The specified DB instance must have * automatic backups enabled, its backup retention period must be greater * than 0.</li> <li>If the source DB instance is in the same region as * the Read Replica, specify a valid DB instance identifier.</li> <li>If * the source DB instance is in a different region than the Read Replica, * specify a valid DB instance ARN. For more information, go to <a * href="http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN"> * Constructing a Amazon RDS Amazon Resource Name (ARN)</a>.</li> </ul> * * @param sourceDBInstanceIdentifier The identifier of the DB instance that will act as the source for the * Read Replica. Each DB instance can have up to five Read Replicas. * <p>Constraints: <ul> <li>Must be the identifier of an existing DB * instance.</li> <li>Can specify a DB instance that is a MySQL Read * Replica only if the source is running MySQL 5.6.</li> <li>Can specify * a DB instance that is a PostgreSQL Read Replica only if the source is * running PostgreSQL 9.3.5.</li> <li>The specified DB instance must have * automatic backups enabled, its backup retention period must be greater * than 0.</li> <li>If the source DB instance is in the same region as * the Read Replica, specify a valid DB instance identifier.</li> <li>If * the source DB instance is in a different region than the Read Replica, * specify a valid DB instance ARN. For more information, go to <a * href="http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN"> * Constructing a Amazon RDS Amazon Resource Name (ARN)</a>.</li> </ul> */ public void setSourceDBInstanceIdentifier(String sourceDBInstanceIdentifier) { this.sourceDBInstanceIdentifier = sourceDBInstanceIdentifier; } /** * The identifier of the DB instance that will act as the source for the * Read Replica. Each DB instance can have up to five Read Replicas. * <p>Constraints: <ul> <li>Must be the identifier of an existing DB * instance.</li> <li>Can specify a DB instance that is a MySQL Read * Replica only if the source is running MySQL 5.6.</li> <li>Can specify * a DB instance that is a PostgreSQL Read Replica only if the source is * running PostgreSQL 9.3.5.</li> <li>The specified DB instance must have * automatic backups enabled, its backup retention period must be greater * than 0.</li> <li>If the source DB instance is in the same region as * the Read Replica, specify a valid DB instance identifier.</li> <li>If * the source DB instance is in a different region than the Read Replica, * specify a valid DB instance ARN. For more information, go to <a * href="http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN"> * Constructing a Amazon RDS Amazon Resource Name (ARN)</a>.</li> </ul> * <p> * Returns a reference to this object so that method calls can be chained together. * * @param sourceDBInstanceIdentifier The identifier of the DB instance that will act as the source for the * Read Replica. Each DB instance can have up to five Read Replicas. * <p>Constraints: <ul> <li>Must be the identifier of an existing DB * instance.</li> <li>Can specify a DB instance that is a MySQL Read * Replica only if the source is running MySQL 5.6.</li> <li>Can specify * a DB instance that is a PostgreSQL Read Replica only if the source is * running PostgreSQL 9.3.5.</li> <li>The specified DB instance must have * automatic backups enabled, its backup retention period must be greater * than 0.</li> <li>If the source DB instance is in the same region as * the Read Replica, specify a valid DB instance identifier.</li> <li>If * the source DB instance is in a different region than the Read Replica, * specify a valid DB instance ARN. For more information, go to <a * href="http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN"> * Constructing a Amazon RDS Amazon Resource Name (ARN)</a>.</li> </ul> * * @return A reference to this updated object so that method calls can be chained * together. */ public CreateDBInstanceReadReplicaRequest withSourceDBInstanceIdentifier(String sourceDBInstanceIdentifier) { this.sourceDBInstanceIdentifier = sourceDBInstanceIdentifier; return this; } /** * The compute and memory capacity of the Read Replica. <p> Valid Values: * <code>db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | * db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | * db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | * db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | * db.t2.micro | db.t2.small | db.t2.medium</code> <p>Default: Inherits * from the source DB instance. * * @return The compute and memory capacity of the Read Replica. <p> Valid Values: * <code>db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | * db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | * db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | * db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | * db.t2.micro | db.t2.small | db.t2.medium</code> <p>Default: Inherits * from the source DB instance. */ public String getDBInstanceClass() { return dBInstanceClass; } /** * The compute and memory capacity of the Read Replica. <p> Valid Values: * <code>db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | * db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | * db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | * db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | * db.t2.micro | db.t2.small | db.t2.medium</code> <p>Default: Inherits * from the source DB instance. * * @param dBInstanceClass The compute and memory capacity of the Read Replica. <p> Valid Values: * <code>db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | * db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | * db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | * db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | * db.t2.micro | db.t2.small | db.t2.medium</code> <p>Default: Inherits * from the source DB instance. */ public void setDBInstanceClass(String dBInstanceClass) { this.dBInstanceClass = dBInstanceClass; } /** * The compute and memory capacity of the Read Replica. <p> Valid Values: * <code>db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | * db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | * db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | * db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | * db.t2.micro | db.t2.small | db.t2.medium</code> <p>Default: Inherits * from the source DB instance. * <p> * Returns a reference to this object so that method calls can be chained together. * * @param dBInstanceClass The compute and memory capacity of the Read Replica. <p> Valid Values: * <code>db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | * db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | * db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | * db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | * db.t2.micro | db.t2.small | db.t2.medium</code> <p>Default: Inherits * from the source DB instance. * * @return A reference to this updated object so that method calls can be chained * together. */ public CreateDBInstanceReadReplicaRequest withDBInstanceClass(String dBInstanceClass) { this.dBInstanceClass = dBInstanceClass; return this; } /** * The Amazon EC2 Availability Zone that the Read Replica will be created * in. <p> Default: A random, system-chosen Availability Zone in the * endpoint's region. <p> Example: <code>us-east-1d</code> * * @return The Amazon EC2 Availability Zone that the Read Replica will be created * in. <p> Default: A random, system-chosen Availability Zone in the * endpoint's region. <p> Example: <code>us-east-1d</code> */ public String getAvailabilityZone() { return availabilityZone; } /** * The Amazon EC2 Availability Zone that the Read Replica will be created * in. <p> Default: A random, system-chosen Availability Zone in the * endpoint's region. <p> Example: <code>us-east-1d</code> * * @param availabilityZone The Amazon EC2 Availability Zone that the Read Replica will be created * in. <p> Default: A random, system-chosen Availability Zone in the * endpoint's region. <p> Example: <code>us-east-1d</code> */ public void setAvailabilityZone(String availabilityZone) { this.availabilityZone = availabilityZone; } /** * The Amazon EC2 Availability Zone that the Read Replica will be created * in. <p> Default: A random, system-chosen Availability Zone in the * endpoint's region. <p> Example: <code>us-east-1d</code> * <p> * Returns a reference to this object so that method calls can be chained together. * * @param availabilityZone The Amazon EC2 Availability Zone that the Read Replica will be created * in. <p> Default: A random, system-chosen Availability Zone in the * endpoint's region. <p> Example: <code>us-east-1d</code> * * @return A reference to this updated object so that method calls can be chained * together. */ public CreateDBInstanceReadReplicaRequest withAvailabilityZone(String availabilityZone) { this.availabilityZone = availabilityZone; return this; } /** * The port number that the DB instance uses for connections. <p>Default: * Inherits from the source DB instance <p>Valid Values: * <code>1150-65535</code> * * @return The port number that the DB instance uses for connections. <p>Default: * Inherits from the source DB instance <p>Valid Values: * <code>1150-65535</code> */ public Integer getPort() { return port; } /** * The port number that the DB instance uses for connections. <p>Default: * Inherits from the source DB instance <p>Valid Values: * <code>1150-65535</code> * * @param port The port number that the DB instance uses for connections. <p>Default: * Inherits from the source DB instance <p>Valid Values: * <code>1150-65535</code> */ public void setPort(Integer port) { this.port = port; } /** * The port number that the DB instance uses for connections. <p>Default: * Inherits from the source DB instance <p>Valid Values: * <code>1150-65535</code> * <p> * Returns a reference to this object so that method calls can be chained together. * * @param port The port number that the DB instance uses for connections. <p>Default: * Inherits from the source DB instance <p>Valid Values: * <code>1150-65535</code> * * @return A reference to this updated object so that method calls can be chained * together. */ public CreateDBInstanceReadReplicaRequest withPort(Integer port) { this.port = port; return this; } /** * Indicates that minor engine upgrades will be applied automatically to * the Read Replica during the maintenance window. <p>Default: Inherits * from the source DB instance * * @return Indicates that minor engine upgrades will be applied automatically to * the Read Replica during the maintenance window. <p>Default: Inherits * from the source DB instance */ public Boolean isAutoMinorVersionUpgrade() { return autoMinorVersionUpgrade; } /** * Indicates that minor engine upgrades will be applied automatically to * the Read Replica during the maintenance window. <p>Default: Inherits * from the source DB instance * * @param autoMinorVersionUpgrade Indicates that minor engine upgrades will be applied automatically to * the Read Replica during the maintenance window. <p>Default: Inherits * from the source DB instance */ public void setAutoMinorVersionUpgrade(Boolean autoMinorVersionUpgrade) { this.autoMinorVersionUpgrade = autoMinorVersionUpgrade; } /** * Indicates that minor engine upgrades will be applied automatically to * the Read Replica during the maintenance window. <p>Default: Inherits * from the source DB instance * <p> * Returns a reference to this object so that method calls can be chained together. * * @param autoMinorVersionUpgrade Indicates that minor engine upgrades will be applied automatically to * the Read Replica during the maintenance window. <p>Default: Inherits * from the source DB instance * * @return A reference to this updated object so that method calls can be chained * together. */ public CreateDBInstanceReadReplicaRequest withAutoMinorVersionUpgrade(Boolean autoMinorVersionUpgrade) { this.autoMinorVersionUpgrade = autoMinorVersionUpgrade; return this; } /** * Indicates that minor engine upgrades will be applied automatically to * the Read Replica during the maintenance window. <p>Default: Inherits * from the source DB instance * * @return Indicates that minor engine upgrades will be applied automatically to * the Read Replica during the maintenance window. <p>Default: Inherits * from the source DB instance */ public Boolean getAutoMinorVersionUpgrade() { return autoMinorVersionUpgrade; } /** * The amount of Provisioned IOPS (input/output operations per second) to * be initially allocated for the DB instance. * * @return The amount of Provisioned IOPS (input/output operations per second) to * be initially allocated for the DB instance. */ public Integer getIops() { return iops; } /** * The amount of Provisioned IOPS (input/output operations per second) to * be initially allocated for the DB instance. * * @param iops The amount of Provisioned IOPS (input/output operations per second) to * be initially allocated for the DB instance. */ public void setIops(Integer iops) { this.iops = iops; } /** * The amount of Provisioned IOPS (input/output operations per second) to * be initially allocated for the DB instance. * <p> * Returns a reference to this object so that method calls can be chained together. * * @param iops The amount of Provisioned IOPS (input/output operations per second) to * be initially allocated for the DB instance. * * @return A reference to this updated object so that method calls can be chained * together. */ public CreateDBInstanceReadReplicaRequest withIops(Integer iops) { this.iops = iops; return this; } /** * The option group the DB instance will be associated with. If omitted, * the default option group for the engine specified will be used. * * @return The option group the DB instance will be associated with. If omitted, * the default option group for the engine specified will be used. */ public String getOptionGroupName() { return optionGroupName; } /** * The option group the DB instance will be associated with. If omitted, * the default option group for the engine specified will be used. * * @param optionGroupName The option group the DB instance will be associated with. If omitted, * the default option group for the engine specified will be used. */ public void setOptionGroupName(String optionGroupName) { this.optionGroupName = optionGroupName; } /** * The option group the DB instance will be associated with. If omitted, * the default option group for the engine specified will be used. * <p> * Returns a reference to this object so that method calls can be chained together. * * @param optionGroupName The option group the DB instance will be associated with. If omitted, * the default option group for the engine specified will be used. * * @return A reference to this updated object so that method calls can be chained * together. */ public CreateDBInstanceReadReplicaRequest withOptionGroupName(String optionGroupName) { this.optionGroupName = optionGroupName; return this; } /** * Specifies the accessibility options for the DB instance. A value of * true specifies an Internet-facing instance with a publicly resolvable * DNS name, which resolves to a public IP address. A value of false * specifies an internal instance with a DNS name that resolves to a * private IP address. <p> Default: The default behavior varies depending * on whether a VPC has been requested or not. The following list shows * the default behavior in each case. <ul> <li> <b>Default * VPC:</b>true</li> <li> <b>VPC:</b>false</li> </ul> <p> If no DB subnet * group has been specified as part of the request and the * PubliclyAccessible value has not been set, the DB instance will be * publicly accessible. If a specific DB subnet group has been specified * as part of the request and the PubliclyAccessible value has not been * set, the DB instance will be private. * * @return Specifies the accessibility options for the DB instance. A value of * true specifies an Internet-facing instance with a publicly resolvable * DNS name, which resolves to a public IP address. A value of false * specifies an internal instance with a DNS name that resolves to a * private IP address. <p> Default: The default behavior varies depending * on whether a VPC has been requested or not. The following list shows * the default behavior in each case. <ul> <li> <b>Default * VPC:</b>true</li> <li> <b>VPC:</b>false</li> </ul> <p> If no DB subnet * group has been specified as part of the request and the * PubliclyAccessible value has not been set, the DB instance will be * publicly accessible. If a specific DB subnet group has been specified * as part of the request and the PubliclyAccessible value has not been * set, the DB instance will be private. */ public Boolean isPubliclyAccessible() { return publiclyAccessible; } /** * Specifies the accessibility options for the DB instance. A value of * true specifies an Internet-facing instance with a publicly resolvable * DNS name, which resolves to a public IP address. A value of false * specifies an internal instance with a DNS name that resolves to a * private IP address. <p> Default: The default behavior varies depending * on whether a VPC has been requested or not. The following list shows * the default behavior in each case. <ul> <li> <b>Default * VPC:</b>true</li> <li> <b>VPC:</b>false</li> </ul> <p> If no DB subnet * group has been specified as part of the request and the * PubliclyAccessible value has not been set, the DB instance will be * publicly accessible. If a specific DB subnet group has been specified * as part of the request and the PubliclyAccessible value has not been * set, the DB instance will be private. * * @param publiclyAccessible Specifies the accessibility options for the DB instance. A value of * true specifies an Internet-facing instance with a publicly resolvable * DNS name, which resolves to a public IP address. A value of false * specifies an internal instance with a DNS name that resolves to a * private IP address. <p> Default: The default behavior varies depending * on whether a VPC has been requested or not. The following list shows * the default behavior in each case. <ul> <li> <b>Default * VPC:</b>true</li> <li> <b>VPC:</b>false</li> </ul> <p> If no DB subnet * group has been specified as part of the request and the * PubliclyAccessible value has not been set, the DB instance will be * publicly accessible. If a specific DB subnet group has been specified * as part of the request and the PubliclyAccessible value has not been * set, the DB instance will be private. */ public void setPubliclyAccessible(Boolean publiclyAccessible) { this.publiclyAccessible = publiclyAccessible; } /** * Specifies the accessibility options for the DB instance. A value of * true specifies an Internet-facing instance with a publicly resolvable * DNS name, which resolves to a public IP address. A value of false * specifies an internal instance with a DNS name that resolves to a * private IP address. <p> Default: The default behavior varies depending * on whether a VPC has been requested or not. The following list shows * the default behavior in each case. <ul> <li> <b>Default * VPC:</b>true</li> <li> <b>VPC:</b>false</li> </ul> <p> If no DB subnet * group has been specified as part of the request and the * PubliclyAccessible value has not been set, the DB instance will be * publicly accessible. If a specific DB subnet group has been specified * as part of the request and the PubliclyAccessible value has not been * set, the DB instance will be private. * <p> * Returns a reference to this object so that method calls can be chained together. * * @param publiclyAccessible Specifies the accessibility options for the DB instance. A value of * true specifies an Internet-facing instance with a publicly resolvable * DNS name, which resolves to a public IP address. A value of false * specifies an internal instance with a DNS name that resolves to a * private IP address. <p> Default: The default behavior varies depending * on whether a VPC has been requested or not. The following list shows * the default behavior in each case. <ul> <li> <b>Default * VPC:</b>true</li> <li> <b>VPC:</b>false</li> </ul> <p> If no DB subnet * group has been specified as part of the request and the * PubliclyAccessible value has not been set, the DB instance will be * publicly accessible. If a specific DB subnet group has been specified * as part of the request and the PubliclyAccessible value has not been * set, the DB instance will be private. * * @return A reference to this updated object so that method calls can be chained * together. */ public CreateDBInstanceReadReplicaRequest withPubliclyAccessible(Boolean publiclyAccessible) { this.publiclyAccessible = publiclyAccessible; return this; } /** * Specifies the accessibility options for the DB instance. A value of * true specifies an Internet-facing instance with a publicly resolvable * DNS name, which resolves to a public IP address. A value of false * specifies an internal instance with a DNS name that resolves to a * private IP address. <p> Default: The default behavior varies depending * on whether a VPC has been requested or not. The following list shows * the default behavior in each case. <ul> <li> <b>Default * VPC:</b>true</li> <li> <b>VPC:</b>false</li> </ul> <p> If no DB subnet * group has been specified as part of the request and the * PubliclyAccessible value has not been set, the DB instance will be * publicly accessible. If a specific DB subnet group has been specified * as part of the request and the PubliclyAccessible value has not been * set, the DB instance will be private. * * @return Specifies the accessibility options for the DB instance. A value of * true specifies an Internet-facing instance with a publicly resolvable * DNS name, which resolves to a public IP address. A value of false * specifies an internal instance with a DNS name that resolves to a * private IP address. <p> Default: The default behavior varies depending * on whether a VPC has been requested or not. The following list shows * the default behavior in each case. <ul> <li> <b>Default * VPC:</b>true</li> <li> <b>VPC:</b>false</li> </ul> <p> If no DB subnet * group has been specified as part of the request and the * PubliclyAccessible value has not been set, the DB instance will be * publicly accessible. If a specific DB subnet group has been specified * as part of the request and the PubliclyAccessible value has not been * set, the DB instance will be private. */ public Boolean getPubliclyAccessible() { return publiclyAccessible; } /** * A list of tags. * * @return A list of tags. */ public java.util.List<Tag> getTags() { if (tags == null) { tags = new com.amazonaws.internal.ListWithAutoConstructFlag<Tag>(); tags.setAutoConstruct(true); } return tags; } /** * A list of tags. * * @param tags A list of tags. */ public void setTags(java.util.Collection<Tag> tags) { if (tags == null) { this.tags = null; return; } com.amazonaws.internal.ListWithAutoConstructFlag<Tag> tagsCopy = new com.amazonaws.internal.ListWithAutoConstructFlag<Tag>(tags.size()); tagsCopy.addAll(tags); this.tags = tagsCopy; } /** * A list of tags. * <p> * <b>NOTE:</b> This method appends the values to the existing list (if * any). Use {@link #setTags(java.util.Collection)} or {@link * #withTags(java.util.Collection)} if you want to override the existing * values. * <p> * Returns a reference to this object so that method calls can be chained together. * * @param tags A list of tags. * * @return A reference to this updated object so that method calls can be chained * together. */ public CreateDBInstanceReadReplicaRequest withTags(Tag... tags) { if (getTags() == null) setTags(new java.util.ArrayList<Tag>(tags.length)); for (Tag value : tags) { getTags().add(value); } return this; } /** * A list of tags. * <p> * Returns a reference to this object so that method calls can be chained together. * * @param tags A list of tags. * * @return A reference to this updated object so that method calls can be chained * together. */ public CreateDBInstanceReadReplicaRequest withTags(java.util.Collection<Tag> tags) { if (tags == null) { this.tags = null; } else { com.amazonaws.internal.ListWithAutoConstructFlag<Tag> tagsCopy = new com.amazonaws.internal.ListWithAutoConstructFlag<Tag>(tags.size()); tagsCopy.addAll(tags); this.tags = tagsCopy; } return this; } /** * Specifies a DB subnet group for the DB instance. The new DB instance * will be created in the VPC associated with the DB subnet group. If no * DB subnet group is specified, then the new DB instance is not created * in a VPC. <p>Constraints: <ul> <li>Can only be specified if the source * DB instance identifier specifies a DB instance in another region.</li> * <li>The specified DB subnet group must be in the same region in which * the operation is running.</li> <li> All Read Replicas in one region * that are created from the same source DB instance must either: * <ul><li>Specify DB subnet groups from the same VPC. All these Read * Replicas will be created in the same VPC.</li><li>Not specify a DB * subnet group. All these Read Replicas will be created outside of any * VPC.</li></ul></li> </ul> * * @return Specifies a DB subnet group for the DB instance. The new DB instance * will be created in the VPC associated with the DB subnet group. If no * DB subnet group is specified, then the new DB instance is not created * in a VPC. <p>Constraints: <ul> <li>Can only be specified if the source * DB instance identifier specifies a DB instance in another region.</li> * <li>The specified DB subnet group must be in the same region in which * the operation is running.</li> <li> All Read Replicas in one region * that are created from the same source DB instance must either: * <ul><li>Specify DB subnet groups from the same VPC. All these Read * Replicas will be created in the same VPC.</li><li>Not specify a DB * subnet group. All these Read Replicas will be created outside of any * VPC.</li></ul></li> </ul> */ public String getDBSubnetGroupName() { return dBSubnetGroupName; } /** * Specifies a DB subnet group for the DB instance. The new DB instance * will be created in the VPC associated with the DB subnet group. If no * DB subnet group is specified, then the new DB instance is not created * in a VPC. <p>Constraints: <ul> <li>Can only be specified if the source * DB instance identifier specifies a DB instance in another region.</li> * <li>The specified DB subnet group must be in the same region in which * the operation is running.</li> <li> All Read Replicas in one region * that are created from the same source DB instance must either: * <ul><li>Specify DB subnet groups from the same VPC. All these Read * Replicas will be created in the same VPC.</li><li>Not specify a DB * subnet group. All these Read Replicas will be created outside of any * VPC.</li></ul></li> </ul> * * @param dBSubnetGroupName Specifies a DB subnet group for the DB instance. The new DB instance * will be created in the VPC associated with the DB subnet group. If no * DB subnet group is specified, then the new DB instance is not created * in a VPC. <p>Constraints: <ul> <li>Can only be specified if the source * DB instance identifier specifies a DB instance in another region.</li> * <li>The specified DB subnet group must be in the same region in which * the operation is running.</li> <li> All Read Replicas in one region * that are created from the same source DB instance must either: * <ul><li>Specify DB subnet groups from the same VPC. All these Read * Replicas will be created in the same VPC.</li><li>Not specify a DB * subnet group. All these Read Replicas will be created outside of any * VPC.</li></ul></li> </ul> */ public void setDBSubnetGroupName(String dBSubnetGroupName) { this.dBSubnetGroupName = dBSubnetGroupName; } /** * Specifies a DB subnet group for the DB instance. The new DB instance * will be created in the VPC associated with the DB subnet group. If no * DB subnet group is specified, then the new DB instance is not created * in a VPC. <p>Constraints: <ul> <li>Can only be specified if the source * DB instance identifier specifies a DB instance in another region.</li> * <li>The specified DB subnet group must be in the same region in which * the operation is running.</li> <li> All Read Replicas in one region * that are created from the same source DB instance must either: * <ul><li>Specify DB subnet groups from the same VPC. All these Read * Replicas will be created in the same VPC.</li><li>Not specify a DB * subnet group. All these Read Replicas will be created outside of any * VPC.</li></ul></li> </ul> * <p> * Returns a reference to this object so that method calls can be chained together. * * @param dBSubnetGroupName Specifies a DB subnet group for the DB instance. The new DB instance * will be created in the VPC associated with the DB subnet group. If no * DB subnet group is specified, then the new DB instance is not created * in a VPC. <p>Constraints: <ul> <li>Can only be specified if the source * DB instance identifier specifies a DB instance in another region.</li> * <li>The specified DB subnet group must be in the same region in which * the operation is running.</li> <li> All Read Replicas in one region * that are created from the same source DB instance must either: * <ul><li>Specify DB subnet groups from the same VPC. All these Read * Replicas will be created in the same VPC.</li><li>Not specify a DB * subnet group. All these Read Replicas will be created outside of any * VPC.</li></ul></li> </ul> * * @return A reference to this updated object so that method calls can be chained * together. */ public CreateDBInstanceReadReplicaRequest withDBSubnetGroupName(String dBSubnetGroupName) { this.dBSubnetGroupName = dBSubnetGroupName; return this; } /** * Specifies the storage type to be associated with the Read Replica. <p> * Valid values: <code>standard | gp2 | io1</code> <p> If you specify * <code>io1</code>, you must also include a value for the * <code>Iops</code> parameter. <p> Default: <code>io1</code> if the * <code>Iops</code> parameter is specified; otherwise * <code>standard</code> * * @return Specifies the storage type to be associated with the Read Replica. <p> * Valid values: <code>standard | gp2 | io1</code> <p> If you specify * <code>io1</code>, you must also include a value for the * <code>Iops</code> parameter. <p> Default: <code>io1</code> if the * <code>Iops</code> parameter is specified; otherwise * <code>standard</code> */ public String getStorageType() { return storageType; } /** * Specifies the storage type to be associated with the Read Replica. <p> * Valid values: <code>standard | gp2 | io1</code> <p> If you specify * <code>io1</code>, you must also include a value for the * <code>Iops</code> parameter. <p> Default: <code>io1</code> if the * <code>Iops</code> parameter is specified; otherwise * <code>standard</code> * * @param storageType Specifies the storage type to be associated with the Read Replica. <p> * Valid values: <code>standard | gp2 | io1</code> <p> If you specify * <code>io1</code>, you must also include a value for the * <code>Iops</code> parameter. <p> Default: <code>io1</code> if the * <code>Iops</code> parameter is specified; otherwise * <code>standard</code> */ public void setStorageType(String storageType) { this.storageType = storageType; } /** * Specifies the storage type to be associated with the Read Replica. <p> * Valid values: <code>standard | gp2 | io1</code> <p> If you specify * <code>io1</code>, you must also include a value for the * <code>Iops</code> parameter. <p> Default: <code>io1</code> if the * <code>Iops</code> parameter is specified; otherwise * <code>standard</code> * <p> * Returns a reference to this object so that method calls can be chained together. * * @param storageType Specifies the storage type to be associated with the Read Replica. <p> * Valid values: <code>standard | gp2 | io1</code> <p> If you specify * <code>io1</code>, you must also include a value for the * <code>Iops</code> parameter. <p> Default: <code>io1</code> if the * <code>Iops</code> parameter is specified; otherwise * <code>standard</code> * * @return A reference to this updated object so that method calls can be chained * together. */ public CreateDBInstanceReadReplicaRequest withStorageType(String storageType) { this.storageType = storageType; return this; } /** * Returns a string representation of this object; useful for testing and * debugging. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getDBInstanceIdentifier() != null) sb.append("DBInstanceIdentifier: " + getDBInstanceIdentifier() + ","); if (getSourceDBInstanceIdentifier() != null) sb.append("SourceDBInstanceIdentifier: " + getSourceDBInstanceIdentifier() + ","); if (getDBInstanceClass() != null) sb.append("DBInstanceClass: " + getDBInstanceClass() + ","); if (getAvailabilityZone() != null) sb.append("AvailabilityZone: " + getAvailabilityZone() + ","); if (getPort() != null) sb.append("Port: " + getPort() + ","); if (isAutoMinorVersionUpgrade() != null) sb.append("AutoMinorVersionUpgrade: " + isAutoMinorVersionUpgrade() + ","); if (getIops() != null) sb.append("Iops: " + getIops() + ","); if (getOptionGroupName() != null) sb.append("OptionGroupName: " + getOptionGroupName() + ","); if (isPubliclyAccessible() != null) sb.append("PubliclyAccessible: " + isPubliclyAccessible() + ","); if (getTags() != null) sb.append("Tags: " + getTags() + ","); if (getDBSubnetGroupName() != null) sb.append("DBSubnetGroupName: " + getDBSubnetGroupName() + ","); if (getStorageType() != null) sb.append("StorageType: " + getStorageType() ); sb.append("}"); return sb.toString(); } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getDBInstanceIdentifier() == null) ? 0 : getDBInstanceIdentifier().hashCode()); hashCode = prime * hashCode + ((getSourceDBInstanceIdentifier() == null) ? 0 : getSourceDBInstanceIdentifier().hashCode()); hashCode = prime * hashCode + ((getDBInstanceClass() == null) ? 0 : getDBInstanceClass().hashCode()); hashCode = prime * hashCode + ((getAvailabilityZone() == null) ? 0 : getAvailabilityZone().hashCode()); hashCode = prime * hashCode + ((getPort() == null) ? 0 : getPort().hashCode()); hashCode = prime * hashCode + ((isAutoMinorVersionUpgrade() == null) ? 0 : isAutoMinorVersionUpgrade().hashCode()); hashCode = prime * hashCode + ((getIops() == null) ? 0 : getIops().hashCode()); hashCode = prime * hashCode + ((getOptionGroupName() == null) ? 0 : getOptionGroupName().hashCode()); hashCode = prime * hashCode + ((isPubliclyAccessible() == null) ? 0 : isPubliclyAccessible().hashCode()); hashCode = prime * hashCode + ((getTags() == null) ? 0 : getTags().hashCode()); hashCode = prime * hashCode + ((getDBSubnetGroupName() == null) ? 0 : getDBSubnetGroupName().hashCode()); hashCode = prime * hashCode + ((getStorageType() == null) ? 0 : getStorageType().hashCode()); return hashCode; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof CreateDBInstanceReadReplicaRequest == false) return false; CreateDBInstanceReadReplicaRequest other = (CreateDBInstanceReadReplicaRequest)obj; if (other.getDBInstanceIdentifier() == null ^ this.getDBInstanceIdentifier() == null) return false; if (other.getDBInstanceIdentifier() != null && other.getDBInstanceIdentifier().equals(this.getDBInstanceIdentifier()) == false) return false; if (other.getSourceDBInstanceIdentifier() == null ^ this.getSourceDBInstanceIdentifier() == null) return false; if (other.getSourceDBInstanceIdentifier() != null && other.getSourceDBInstanceIdentifier().equals(this.getSourceDBInstanceIdentifier()) == false) return false; if (other.getDBInstanceClass() == null ^ this.getDBInstanceClass() == null) return false; if (other.getDBInstanceClass() != null && other.getDBInstanceClass().equals(this.getDBInstanceClass()) == false) return false; if (other.getAvailabilityZone() == null ^ this.getAvailabilityZone() == null) return false; if (other.getAvailabilityZone() != null && other.getAvailabilityZone().equals(this.getAvailabilityZone()) == false) return false; if (other.getPort() == null ^ this.getPort() == null) return false; if (other.getPort() != null && other.getPort().equals(this.getPort()) == false) return false; if (other.isAutoMinorVersionUpgrade() == null ^ this.isAutoMinorVersionUpgrade() == null) return false; if (other.isAutoMinorVersionUpgrade() != null && other.isAutoMinorVersionUpgrade().equals(this.isAutoMinorVersionUpgrade()) == false) return false; if (other.getIops() == null ^ this.getIops() == null) return false; if (other.getIops() != null && other.getIops().equals(this.getIops()) == false) return false; if (other.getOptionGroupName() == null ^ this.getOptionGroupName() == null) return false; if (other.getOptionGroupName() != null && other.getOptionGroupName().equals(this.getOptionGroupName()) == false) return false; if (other.isPubliclyAccessible() == null ^ this.isPubliclyAccessible() == null) return false; if (other.isPubliclyAccessible() != null && other.isPubliclyAccessible().equals(this.isPubliclyAccessible()) == false) return false; if (other.getTags() == null ^ this.getTags() == null) return false; if (other.getTags() != null && other.getTags().equals(this.getTags()) == false) return false; if (other.getDBSubnetGroupName() == null ^ this.getDBSubnetGroupName() == null) return false; if (other.getDBSubnetGroupName() != null && other.getDBSubnetGroupName().equals(this.getDBSubnetGroupName()) == false) return false; if (other.getStorageType() == null ^ this.getStorageType() == null) return false; if (other.getStorageType() != null && other.getStorageType().equals(this.getStorageType()) == false) return false; return true; } @Override public CreateDBInstanceReadReplicaRequest clone() { return (CreateDBInstanceReadReplicaRequest) super.clone(); } }
apache-2.0
cymcsg/UltimateAndroid
deprecated/UltimateAndroidNormal/UltimateAndroid/src/com/marshalchen/common/usefulModule/standuptimer/dao/DuplicateTeamException.java
309
/* * Copyright (c) 2014. Marshal Chen. */ package com.marshalchen.common.usefulModule.standuptimer.dao; public class DuplicateTeamException extends RuntimeException { private static final long serialVersionUID = 1L; public DuplicateTeamException(String message) { super(message); } }
apache-2.0
jiangyubao/moquette-mqtt
parser_commons/src/main/java/org/dna/mqtt/moquette/proto/messages/UnsubAckMessage.java
825
/* * Copyright (c) 2012-2014 The original author or authors * ------------------------------------------------------ * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * and Apache License v2.0 which accompanies this distribution. * * The Eclipse Public License is available at * http://www.eclipse.org/legal/epl-v10.html * * The Apache License v2.0 is available at * http://www.opensource.org/licenses/apache2.0.php * * You may elect to redistribute this code under either of these licenses. */ package org.dna.mqtt.moquette.proto.messages; /** * * @author andrea */ public class UnsubAckMessage extends MessageIDMessage { public UnsubAckMessage() { m_messageType = AbstractMessage.UNSUBACK; } }
apache-2.0
rokn/Count_Words_2015
testing/drools-master/drools-beliefs/src/main/java/org/drools/beliefs/bayes/Marginalizer.java
1282
/* * Copyright 2015 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.beliefs.bayes; public class Marginalizer { public Marginalizer(BayesVariable[] srcVars, double[] srcPotentials, BayesVariable var, double[] varDistribution) { BayesVariable[] trgVars = new BayesVariable[] { var }; int[] trgVarPos = PotentialMultiplier.createSubsetVarPos(srcVars, trgVars); int trgVarNumberOfStates = PotentialMultiplier.createNumberOfStates(trgVars); int[] trgVarMultipliers = PotentialMultiplier.createIndexMultipliers(trgVars, trgVarNumberOfStates); BayesProjection p = new BayesProjection(srcVars, srcPotentials, trgVarPos, trgVarMultipliers, varDistribution); p.project(); } }
mit
rokn/Count_Words_2015
testing/drools-master/drools-examples-cdi/cdi-example/src/main/java/org/drools/example/cdi/cdiexample/Message.java
2000
/* * Copyright 2015 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.example.cdi.cdiexample; public class Message { private String name; private String text; public Message(String name, String text) { this.text = text; this.name = name; } public String getText() { return text; } public void setText(String text) { this.text = text; } public String getName() { return name; } public void setName(String name) { this.name = name; } public String toString() { return "Message[name='" + name + "' text='" + text + "'"; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((name == null) ? 0 : name.hashCode()); result = prime * result + ((text == null) ? 0 : text.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } Message other = (Message) obj; if (name == null) { if (other.name != null) { return false; } } else if (!name.equals(other.name)) { return false; } if (text == null) { if (other.text != null) { return false; } } else if (!text.equals(other.text)) { return false; } return true; } }
mit
skhalifa/QDrill
drill1.2/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
8019
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.drill.exec.store.hive.schema; import java.io.IOException; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.calcite.schema.SchemaPlus; import org.apache.drill.common.exceptions.ExecutionSetupException; import org.apache.drill.exec.ExecConstants; import org.apache.drill.exec.planner.logical.DrillTable; import org.apache.drill.exec.store.AbstractSchema; import org.apache.drill.exec.store.SchemaConfig; import org.apache.drill.exec.store.SchemaFactory; import org.apache.drill.exec.store.hive.DrillHiveMetaStoreClient; import org.apache.drill.exec.store.hive.HiveReadEntry; import org.apache.drill.exec.store.hive.HiveStoragePlugin; import org.apache.drill.exec.store.hive.HiveStoragePluginConfig; import org.apache.drill.exec.util.ImpersonationUtil; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.thrift.TException; import com.google.common.collect.ImmutableList; import com.google.common.collect.Sets; public class HiveSchemaFactory implements SchemaFactory { static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveSchemaFactory.class); // MetaStoreClient created using process user credentials private final DrillHiveMetaStoreClient processUserMetastoreClient; private final HiveStoragePlugin plugin; private final Map<String, String> hiveConfigOverride; private final String schemaName; private final HiveConf hiveConf; private final boolean isDrillImpersonationEnabled; private final boolean isHS2DoAsSet; public HiveSchemaFactory(HiveStoragePlugin plugin, String name, Map<String, String> hiveConfigOverride) throws ExecutionSetupException { this.schemaName = name; this.plugin = plugin; this.hiveConfigOverride = hiveConfigOverride; hiveConf = new HiveConf(); if (hiveConfigOverride != null) { for (Map.Entry<String, String> entry : hiveConfigOverride.entrySet()) { final String property = entry.getKey(); final String value = entry.getValue(); hiveConf.set(property, value); logger.trace("HiveConfig Override {}={}", property, value); } } isHS2DoAsSet = hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS); isDrillImpersonationEnabled = plugin.getContext().getConfig().getBoolean(ExecConstants.IMPERSONATION_ENABLED); try { processUserMetastoreClient = DrillHiveMetaStoreClient.createNonCloseableClientWithCaching(hiveConf, hiveConfigOverride); } catch (MetaException e) { throw new ExecutionSetupException("Failure setting up Hive metastore client.", e); } } /** * Does Drill needs to impersonate as user connected to Drill when reading data from Hive warehouse location? * @return True when both Drill impersonation and Hive impersonation are enabled. */ private boolean needToImpersonateReadingData() { return isDrillImpersonationEnabled && isHS2DoAsSet; } @Override public void registerSchemas(SchemaConfig schemaConfig, SchemaPlus parent) throws IOException { DrillHiveMetaStoreClient mClientForSchemaTree = processUserMetastoreClient; if (isDrillImpersonationEnabled) { try { mClientForSchemaTree = DrillHiveMetaStoreClient.createClientWithAuthz(processUserMetastoreClient, hiveConf, hiveConfigOverride, schemaConfig.getUserName(), schemaConfig.getIgnoreAuthErrors()); } catch (final TException e) { throw new IOException("Failure setting up Hive metastore client.", e); } } HiveSchema schema = new HiveSchema(schemaConfig, mClientForSchemaTree, schemaName); SchemaPlus hPlus = parent.add(schemaName, schema); schema.setHolder(hPlus); } class HiveSchema extends AbstractSchema { private final SchemaConfig schemaConfig; private final DrillHiveMetaStoreClient mClient; private HiveDatabaseSchema defaultSchema; public HiveSchema(final SchemaConfig schemaConfig, final DrillHiveMetaStoreClient mClient, final String name) { super(ImmutableList.<String>of(), name); this.schemaConfig = schemaConfig; this.mClient = mClient; getSubSchema("default"); } @Override public AbstractSchema getSubSchema(String name) { List<String> tables; try { List<String> dbs = mClient.getDatabases(); if (!dbs.contains(name)) { logger.debug("Database '{}' doesn't exists in Hive storage '{}'", name, schemaName); return null; } tables = mClient.getTableNames(name); HiveDatabaseSchema schema = new HiveDatabaseSchema(tables, this, name); if (name.equals("default")) { this.defaultSchema = schema; } return schema; } catch (final TException e) { logger.warn("Failure while attempting to access HiveDatabase '{}'.", name, e.getCause()); return null; } } void setHolder(SchemaPlus plusOfThis) { for (String s : getSubSchemaNames()) { plusOfThis.add(s, getSubSchema(s)); } } @Override public boolean showInInformationSchema() { return false; } @Override public Set<String> getSubSchemaNames() { try { List<String> dbs = mClient.getDatabases(); return Sets.newHashSet(dbs); } catch (final TException e) { logger.warn("Failure while getting Hive database list.", e); } return super.getSubSchemaNames(); } @Override public org.apache.calcite.schema.Table getTable(String name) { if (defaultSchema == null) { return super.getTable(name); } return defaultSchema.getTable(name); } @Override public Set<String> getTableNames() { if (defaultSchema == null) { return super.getTableNames(); } return defaultSchema.getTableNames(); } DrillTable getDrillTable(String dbName, String t) { HiveReadEntry entry = getSelectionBaseOnName(dbName, t); if (entry == null) { return null; } final String userToImpersonate = needToImpersonateReadingData() ? schemaConfig.getUserName() : ImpersonationUtil.getProcessUserName(); if (entry.getJdbcTableType() == TableType.VIEW) { return new DrillHiveViewTable(schemaName, plugin, userToImpersonate, entry); } else { return new DrillHiveTable(schemaName, plugin, userToImpersonate, entry); } } HiveReadEntry getSelectionBaseOnName(String dbName, String t) { if (dbName == null) { dbName = "default"; } try{ return mClient.getHiveReadEntry(dbName, t); }catch(final TException e) { logger.warn("Exception occurred while trying to read table. {}.{}", dbName, t, e.getCause()); return null; } } @Override public AbstractSchema getDefaultSchema() { return defaultSchema; } @Override public String getTypeName() { return HiveStoragePluginConfig.NAME; } @Override public void close() throws Exception { if (mClient != null) { mClient.close(); } } } }
apache-2.0
yubaokang/android-menudrawer
menudrawer-samples/src/net/simonvt/menudrawer/samples/WindowSample.java
3456
package net.simonvt.menudrawer.samples; import net.simonvt.menudrawer.MenuDrawer; import net.simonvt.menudrawer.Position; import android.app.Activity; import android.os.Build; import android.os.Bundle; import android.view.MenuItem; import android.view.View; import android.widget.TextView; public class WindowSample extends Activity implements View.OnClickListener { private static final String STATE_MENUDRAWER = "net.simonvt.menudrawer.samples.WindowSample.menuDrawer"; private static final String STATE_ACTIVE_VIEW_ID = "net.simonvt.menudrawer.samples.WindowSample.activeViewId"; private MenuDrawer mMenuDrawer; private TextView mContentTextView; private int mActiveViewId; @Override public void onCreate(Bundle inState) { super.onCreate(inState); if (inState != null) { mActiveViewId = inState.getInt(STATE_ACTIVE_VIEW_ID); } mMenuDrawer = MenuDrawer.attach(this, MenuDrawer.Type.BEHIND, Position.LEFT, MenuDrawer.MENU_DRAG_WINDOW); mMenuDrawer.setContentView(R.layout.activity_windowsample); mMenuDrawer.setMenuView(R.layout.menu_scrollview); if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) { getActionBar().setDisplayHomeAsUpEnabled(true); } mContentTextView = (TextView) findViewById(R.id.contentText); findViewById(R.id.item1).setOnClickListener(this); findViewById(R.id.item2).setOnClickListener(this); findViewById(R.id.item3).setOnClickListener(this); findViewById(R.id.item4).setOnClickListener(this); findViewById(R.id.item5).setOnClickListener(this); findViewById(R.id.item6).setOnClickListener(this); TextView activeView = (TextView) findViewById(mActiveViewId); if (activeView != null) { mMenuDrawer.setActiveView(activeView); mContentTextView.setText("Active item: " + activeView.getText()); } // This will animate the drawer open and closed until the user manually drags it. Usually this would only be // called on first launch. mMenuDrawer.peekDrawer(); } @Override protected void onRestoreInstanceState(Bundle inState) { super.onRestoreInstanceState(inState); mMenuDrawer.restoreState(inState.getParcelable(STATE_MENUDRAWER)); } @Override protected void onSaveInstanceState(Bundle outState) { super.onSaveInstanceState(outState); outState.putParcelable(STATE_MENUDRAWER, mMenuDrawer.saveState()); outState.putInt(STATE_ACTIVE_VIEW_ID, mActiveViewId); } @Override public boolean onOptionsItemSelected(MenuItem item) { switch (item.getItemId()) { case android.R.id.home: mMenuDrawer.toggleMenu(); return true; } return super.onOptionsItemSelected(item); } @Override public void onBackPressed() { final int drawerState = mMenuDrawer.getDrawerState(); if (drawerState == MenuDrawer.STATE_OPEN || drawerState == MenuDrawer.STATE_OPENING) { mMenuDrawer.closeMenu(); return; } super.onBackPressed(); } @Override public void onClick(View v) { mMenuDrawer.setActiveView(v); mContentTextView.setText("Active item: " + ((TextView) v).getText()); mMenuDrawer.closeMenu(); mActiveViewId = v.getId(); } }
apache-2.0
JingchengDu/hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/package-info.java
897
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** Router policies. **/ package org.apache.hadoop.yarn.server.federation.policies.router;
apache-2.0
sheofir/aws-sdk-java
aws-java-sdk-kinesis/src/main/java/com/amazonaws/services/kinesis/model/transform/PutRecordRequestMarshaller.java
3711
/* * Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.kinesis.model.transform; import static com.amazonaws.util.StringUtils.UTF8; import static com.amazonaws.util.StringUtils.COMMA_SEPARATOR; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.OutputStreamWriter; import java.io.StringWriter; import java.io.Writer; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.List; import java.util.regex.Pattern; import com.amazonaws.AmazonClientException; import com.amazonaws.Request; import com.amazonaws.DefaultRequest; import com.amazonaws.http.HttpMethodName; import com.amazonaws.services.kinesis.model.*; import com.amazonaws.transform.Marshaller; import com.amazonaws.util.BinaryUtils; import com.amazonaws.util.StringUtils; import com.amazonaws.util.StringInputStream; import com.amazonaws.util.json.*; /** * Put Record Request Marshaller */ public class PutRecordRequestMarshaller implements Marshaller<Request<PutRecordRequest>, PutRecordRequest> { public Request<PutRecordRequest> marshall(PutRecordRequest putRecordRequest) { if (putRecordRequest == null) { throw new AmazonClientException("Invalid argument passed to marshall(...)"); } Request<PutRecordRequest> request = new DefaultRequest<PutRecordRequest>(putRecordRequest, "AmazonKinesis"); String target = "Kinesis_20131202.PutRecord"; request.addHeader("X-Amz-Target", target); request.setHttpMethod(HttpMethodName.POST); request.setResourcePath(""); try { StringWriter stringWriter = new StringWriter(); JSONWriter jsonWriter = new JSONWriter(stringWriter); jsonWriter.object(); if (putRecordRequest.getStreamName() != null) { jsonWriter.key("StreamName").value(putRecordRequest.getStreamName()); } if (putRecordRequest.getData() != null) { jsonWriter.key("Data").value(putRecordRequest.getData()); } if (putRecordRequest.getPartitionKey() != null) { jsonWriter.key("PartitionKey").value(putRecordRequest.getPartitionKey()); } if (putRecordRequest.getExplicitHashKey() != null) { jsonWriter.key("ExplicitHashKey").value(putRecordRequest.getExplicitHashKey()); } if (putRecordRequest.getSequenceNumberForOrdering() != null) { jsonWriter.key("SequenceNumberForOrdering").value(putRecordRequest.getSequenceNumberForOrdering()); } jsonWriter.endObject(); String snippet = stringWriter.toString(); byte[] content = snippet.getBytes(UTF8); request.setContent(new StringInputStream(snippet)); request.addHeader("Content-Length", Integer.toString(content.length)); request.addHeader("Content-Type", "application/x-amz-json-1.1"); } catch(Throwable t) { throw new AmazonClientException("Unable to marshall request to JSON: " + t.getMessage(), t); } return request; } }
apache-2.0
Etiene/Algorithm-Implementations
Counting_Sort/Java/rrivera1849/Counting_Sort_test.java
539
import static org.junit.Assert.*; import org.junit.Assert; import org.junit.Test; import java.util.Arrays; public class Counting_Sort_test extends Counting_Sort { @Test public void test() { int[] arrayToSort = {9,9,3,3,2,1,5,4,4,7,7}; int[] expectedArray = {1,2,3,3,4,4,5,7,7,9,9}; int[] sortedArray = countingSort(arrayToSort,9); String errorMsg = String.format("Error- got %s, expected %s" ,Arrays.toString(expectedArray),Arrays.toString(sortedArray)); Assert.assertArrayEquals(expectedArray,sortedArray); } }
mit
antoaravinth/incubator-groovy
src/main/org/codehaus/groovy/classgen/ReturnAdder.java
11195
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.codehaus.groovy.classgen; import org.codehaus.groovy.ast.ClassHelper; import org.codehaus.groovy.ast.MethodNode; import org.codehaus.groovy.ast.VariableScope; import org.codehaus.groovy.ast.expr.ConstantExpression; import org.codehaus.groovy.ast.expr.Expression; import org.codehaus.groovy.ast.stmt.*; import java.util.List; import java.util.ArrayList; /** * Utility class to add return statements. * Extracted from Verifier as it can be useful for some AST transformations */ public class ReturnAdder { private static final ReturnStatementListener DEFAULT_LISTENER = new ReturnStatementListener() { public void returnStatementAdded(final ReturnStatement returnStatement) { } }; /** * If set to 'true', then returns are effectively added. This is useful whenever you just want * to check what returns are produced without eventually adding them. */ private final boolean doAdd; private final ReturnStatementListener listener; public ReturnAdder() { doAdd = true; listener = DEFAULT_LISTENER; } public ReturnAdder(ReturnStatementListener listener) { this.listener = listener; this.doAdd = false; } /** * Adds return statements in method code whenever an implicit return is detected. * @param node the method node where to add return statements * @deprecated Use {@link #visitMethod(org.codehaus.groovy.ast.MethodNode)} instead */ public static void addReturnIfNeeded(MethodNode node) { ReturnAdder adder = new ReturnAdder(); adder.visitMethod(node); } public void visitMethod(MethodNode node) { Statement statement = node.getCode(); if (!node.isVoidMethod()) { if (statement != null) // it happens with @interface methods { final Statement code = addReturnsIfNeeded(statement, node.getVariableScope()); if (doAdd) node.setCode(code); } } else if (!node.isAbstract() && node.getReturnType().redirect()!=ClassHelper.VOID_TYPE) { if (!(statement instanceof BytecodeSequence)) { BlockStatement newBlock = new BlockStatement(); Statement code = node.getCode(); if (code instanceof BlockStatement) { newBlock.setVariableScope(((BlockStatement) code).getVariableScope()); } if (statement instanceof BlockStatement) { newBlock.addStatements(((BlockStatement)statement).getStatements()); } else { newBlock.addStatement(statement); } final ReturnStatement returnStatement = ReturnStatement.RETURN_NULL_OR_VOID; listener.returnStatementAdded(returnStatement); newBlock.addStatement(returnStatement); newBlock.setSourcePosition(statement); if (doAdd) node.setCode(newBlock); } } } private Statement addReturnsIfNeeded(Statement statement, VariableScope scope) { if ( statement instanceof ReturnStatement || statement instanceof BytecodeSequence || statement instanceof ThrowStatement) { return statement; } if (statement instanceof EmptyStatement) { final ReturnStatement returnStatement = new ReturnStatement(ConstantExpression.NULL); listener.returnStatementAdded(returnStatement); return returnStatement; } if (statement instanceof ExpressionStatement) { ExpressionStatement expStmt = (ExpressionStatement) statement; Expression expr = expStmt.getExpression(); ReturnStatement ret = new ReturnStatement(expr); ret.setSourcePosition(expr); ret.setStatementLabel(statement.getStatementLabel()); listener.returnStatementAdded(ret); return ret; } if (statement instanceof SynchronizedStatement) { SynchronizedStatement sync = (SynchronizedStatement) statement; final Statement code = addReturnsIfNeeded(sync.getCode(), scope); if (doAdd) sync.setCode(code); return sync; } if (statement instanceof IfStatement) { IfStatement ifs = (IfStatement) statement; final Statement ifBlock = addReturnsIfNeeded(ifs.getIfBlock(), scope); final Statement elseBlock = addReturnsIfNeeded(ifs.getElseBlock(), scope); if (doAdd) { ifs.setIfBlock(ifBlock); ifs.setElseBlock(elseBlock); } return ifs; } if (statement instanceof SwitchStatement) { SwitchStatement swi = (SwitchStatement) statement; for (CaseStatement caseStatement : swi.getCaseStatements()) { final Statement code = adjustSwitchCaseCode(caseStatement.getCode(), scope, false); if (doAdd) caseStatement.setCode(code); } final Statement defaultStatement = adjustSwitchCaseCode(swi.getDefaultStatement(), scope, true); if (doAdd) swi.setDefaultStatement(defaultStatement); return swi; } if (statement instanceof TryCatchStatement) { TryCatchStatement trys = (TryCatchStatement) statement; final boolean[] missesReturn = new boolean[1]; new ReturnAdder(new ReturnStatementListener() { @Override public void returnStatementAdded(ReturnStatement returnStatement) { missesReturn[0] = true; } }).addReturnsIfNeeded(trys.getFinallyStatement(), scope); boolean hasFinally = !(trys.getFinallyStatement() instanceof EmptyStatement); // if there is no missing return in the finally block and the block exists // there is nothing to do if (hasFinally && !missesReturn[0]) return trys; // add returns to try and catch blocks final Statement tryStatement = addReturnsIfNeeded(trys.getTryStatement(), scope); if (doAdd) trys.setTryStatement(tryStatement); final int len = trys.getCatchStatements().size(); for (int i = 0; i != len; ++i) { final CatchStatement catchStatement = trys.getCatchStatement(i); final Statement code = addReturnsIfNeeded(catchStatement.getCode(), scope); if (doAdd) catchStatement.setCode(code); } return trys; } if (statement instanceof BlockStatement) { BlockStatement block = (BlockStatement) statement; final List list = block.getStatements(); if (!list.isEmpty()) { int idx = list.size() - 1; Statement last = addReturnsIfNeeded((Statement) list.get(idx), block.getVariableScope()); if (doAdd) list.set(idx, last); if (!statementReturns(last)) { final ReturnStatement returnStatement = new ReturnStatement(ConstantExpression.NULL); listener.returnStatementAdded(returnStatement); if (doAdd) list.add(returnStatement); } } else { ReturnStatement ret = new ReturnStatement(ConstantExpression.NULL); ret.setSourcePosition(block); listener.returnStatementAdded(ret); return ret; } BlockStatement newBlock = new BlockStatement(list, block.getVariableScope()); newBlock.setSourcePosition(block); return newBlock; } if (statement == null) { final ReturnStatement returnStatement = new ReturnStatement(ConstantExpression.NULL); listener.returnStatementAdded(returnStatement); return returnStatement; } else { final List list = new ArrayList(); list.add(statement); final ReturnStatement returnStatement = new ReturnStatement(ConstantExpression.NULL); listener.returnStatementAdded(returnStatement); list.add(returnStatement); BlockStatement newBlock = new BlockStatement(list, new VariableScope(scope)); newBlock.setSourcePosition(statement); return newBlock; } } private Statement adjustSwitchCaseCode(Statement statement, VariableScope scope, boolean defaultCase) { if(statement instanceof BlockStatement) { final List list = ((BlockStatement)statement).getStatements(); if (!list.isEmpty()) { int idx = list.size() - 1; Statement last = (Statement) list.get(idx); if(last instanceof BreakStatement) { if (doAdd) { list.remove(idx); return addReturnsIfNeeded(statement, scope); } else { BlockStatement newStmt = new BlockStatement(); for (int i=0;i<idx; i++) { newStmt.addStatement((Statement) list.get(i)); } return addReturnsIfNeeded(newStmt, scope); } } else if(defaultCase) { return addReturnsIfNeeded(statement, scope); } } } return statement; } private static boolean statementReturns(Statement last) { return ( last instanceof ReturnStatement || last instanceof BlockStatement || last instanceof IfStatement || last instanceof ExpressionStatement || last instanceof EmptyStatement || last instanceof TryCatchStatement || last instanceof BytecodeSequence || last instanceof ThrowStatement || last instanceof SynchronizedStatement ); } /** * Implement this method in order to be notified whenever a return statement is generated. */ public interface ReturnStatementListener { void returnStatementAdded(ReturnStatement returnStatement); } }
apache-2.0
gfyoung/elasticsearch
x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/jira/JiraActionFactory.java
1464
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ package org.elasticsearch.xpack.watcher.actions.jira; import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.watcher.actions.ActionFactory; import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; import org.elasticsearch.xpack.watcher.notification.jira.JiraService; import java.io.IOException; public class JiraActionFactory extends ActionFactory { private final TextTemplateEngine templateEngine; private final JiraService jiraService; public JiraActionFactory(TextTemplateEngine templateEngine, JiraService jiraService) { super(LogManager.getLogger(ExecutableJiraAction.class)); this.templateEngine = templateEngine; this.jiraService = jiraService; } @Override public ExecutableJiraAction parseExecutable(String watchId, String actionId, XContentParser parser) throws IOException { JiraAction action = JiraAction.parse(watchId, actionId, parser); jiraService.getAccount(action.getAccount()); // for validation -- throws exception if account not present return new ExecutableJiraAction(action, actionLogger, jiraService, templateEngine); } }
apache-2.0
izerui/lemon
src/main/java/com/mossle/bpm/persistence/domain/BpmTaskConf.java
2165
package com.mossle.bpm.persistence.domain; // Generated by Hibernate Tools import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.Id; import javax.persistence.Table; /** * BpmTaskConf . * * @author Lingo */ @Entity @Table(name = "BPM_TASK_CONF") public class BpmTaskConf implements java.io.Serializable { private static final long serialVersionUID = 0L; /** null. */ private Long id; /** null. */ private String businessKey; /** null. */ private String taskDefinitionKey; /** null. */ private String assignee; public BpmTaskConf() { } public BpmTaskConf(String businessKey, String taskDefinitionKey, String assignee) { this.businessKey = businessKey; this.taskDefinitionKey = taskDefinitionKey; this.assignee = assignee; } /** @return null. */ @Id @GeneratedValue @Column(name = "ID", unique = true, nullable = false) public Long getId() { return this.id; } /** * @param id * null. */ public void setId(Long id) { this.id = id; } /** @return null. */ @Column(name = "BUSINESS_KEY", length = 200) public String getBusinessKey() { return this.businessKey; } /** * @param businessKey * null. */ public void setBusinessKey(String businessKey) { this.businessKey = businessKey; } /** @return null. */ @Column(name = "TASK_DEFINITION_KEY", length = 200) public String getTaskDefinitionKey() { return this.taskDefinitionKey; } /** * @param taskDefinitionKey * null. */ public void setTaskDefinitionKey(String taskDefinitionKey) { this.taskDefinitionKey = taskDefinitionKey; } /** @return null. */ @Column(name = "ASSIGNEE", length = 200) public String getAssignee() { return this.assignee; } /** * @param assignee * null. */ public void setAssignee(String assignee) { this.assignee = assignee; } }
apache-2.0
rokn/Count_Words_2015
testing/openjdk2/jaxws/src/share/jaxws_classes/com/sun/xml/internal/rngom/binary/BinaryPattern.java
4309
/* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /* * Copyright (C) 2004-2011 * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package com.sun.xml.internal.rngom.binary; import org.xml.sax.SAXException; import java.util.Collection; import java.util.List; import java.util.ArrayList; public abstract class BinaryPattern extends Pattern { protected final Pattern p1; protected final Pattern p2; BinaryPattern(boolean nullable, int hc, Pattern p1, Pattern p2) { super(nullable, Math.max(p1.getContentType(), p2.getContentType()), hc); this.p1 = p1; this.p2 = p2; } void checkRecursion(int depth) throws SAXException { p1.checkRecursion(depth); p2.checkRecursion(depth); } void checkRestrictions(int context, DuplicateAttributeDetector dad, Alphabet alpha) throws RestrictionViolationException { p1.checkRestrictions(context, dad, alpha); p2.checkRestrictions(context, dad, alpha); } boolean samePattern(Pattern other) { if (getClass() != other.getClass()) return false; BinaryPattern b = (BinaryPattern)other; return p1 == b.p1 && p2 == b.p2; } public final Pattern getOperand1() { return p1; } public final Pattern getOperand2() { return p2; } /** * Adds all the children of this pattern to the given collection. * * <p> * For example, if this pattern is (A|B|C), it adds A, B, and C * to the collection, even though internally it's represented * as (A|(B|C)). */ public final void fillChildren( Collection col ) { fillChildren(getClass(),p1,col); fillChildren(getClass(),p2,col); } /** * Same as {@link #fillChildren(Collection)} but returns an array. */ public final Pattern[] getChildren() { List lst = new ArrayList(); fillChildren(lst); return (Pattern[]) lst.toArray(new Pattern[lst.size()]); } private void fillChildren( Class c, Pattern p, Collection col ) { if(p.getClass()==c) { BinaryPattern bp = (BinaryPattern)p; bp.fillChildren(c,bp.p1,col); bp.fillChildren(c,bp.p2,col); } else { col.add(p); } } }
mit
rokn/Count_Words_2015
testing/openjdk2/jaxws/src/share/jaxws_classes/com/sun/xml/internal/rngom/ast/om/Location.java
2396
/* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /* * Copyright (C) 2004-2011 * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package com.sun.xml.internal.rngom.ast.om; public interface Location { }
mit
apratkin/pentaho-kettle
engine/src/org/pentaho/di/job/entries/ftp/MVSFileParser.java
19792
/*! ****************************************************************************** * * Pentaho Data Integration * * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com * ******************************************************************************* * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package org.pentaho.di.job.entries.ftp; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; import java.util.Locale; import java.util.StringTokenizer; import org.pentaho.di.core.logging.LogChannelInterface; import org.pentaho.di.i18n.BaseMessages; import com.enterprisedt.net.ftp.FTPFile; import com.enterprisedt.net.ftp.FTPFileParser; /** * MVS Folder Listing Parser The purpose of this parser is to be able handle responses from an MVS z/OS mainframe FTP * server. * * Many places on the 'net were consulted for input to this parser. Importantly, this information from * com.os.os2.networking.tcp-ip group: * * http://groups.google.com/group/comp.os.os2.networking.tcp-ip/msg/25acc89563f1e93e * http://groups.google.com/group/comp. * os.os2.networking.tcp-ip/browse_frm/thread/11af1ba1bc6b0edd?hl=en&lr&ie=UTF-8&oe=UTF * -8&rnum=6&prev=/groups?q%3DMVS%2BPartitioned * %2Bdata%2Bset%2Bdirectory%26hl%3Den%26lr%3D%26ie%3DUTF-8%26oe%3DUTF-8%26selm * %3D4e7k0p%2524t1v%2540blackice.winternet.com%26rnum%3D6&pli=1 * http://publibz.boulder.ibm.com/cgi-bin/bookmgr_OS390/BOOKS/F1AA2032/1.5.15?SHELF=&DT=20001127174124 * * Implementation Details 1- This supports folders and partitioned data sets only. This does not support JCL or HFS 2- * You must treat partitioned data sets (Dsorg PO/PO-E) like folders and CD to them 3- Dsorg=PS is a downloadable file * as are all the contents of a Partitioned Data Set. 4- When downloading from a folder, the Recfm must start with V or * F. * * Note - the location for this is completely up for debate. I modeled this after the ftpsget/FTPSConnection and how * ftpsput reaches up and into the ftpsget package to get it. However, I think a better solution is to have an * entry/common. James and I agreed (in Matt's absense) to model the behavior after something already existing rather * than introduce a new folder (like entry/common or entry/util). * * @author mbatchelor September 2010 * */ public class MVSFileParser extends FTPFileParser { private static Class<?> PKG = MVSFileParser.class; // for i18n purposes, needed by Translator2!! /*** DO NOT TRANSLATE THESE ***/ private static final String PARSER_KEY = "MVS"; private static final String HEADER_VOLUME = "Volume"; private static final String HEADER_NAME = "Name"; private static final String LINE_TYPE_ARCIVE = "ARCIVE"; // *** NOT MISSPELLED *** private static final String ENTRY_FILE_TYPE = "PS"; private static final String LINE_TYPE_MIGRATED = "Migrated"; /*** ^^^ DO NOT TRANSLATE THESE ^^^ ***/ private static final int FOLDER_HEADER_TYPE_IDX = 0; private static final int FOLDER_LISTING_LENGTH_NORMAL = 10; private static final int FOLDER_LISTING_LENGTH_ARCIVE = 8; private String dateFormatString; // String used to parse file dates private String alternateFormatString; // Alternate form of date string in case month/day are switched private SimpleDateFormat dateFormat; // The DateFormat object to parse dates with private SimpleDateFormat dateTimeFormat; // The DateFormat object to parse "last modified" date+time with. private boolean partitionedDataset = false; // If true, It's a partitioned data set listing private LogChannelInterface log; public MVSFileParser( LogChannelInterface log ) { this.log = log; } /************************ Abstract Class Implementations *************************/ /* * * This method decides whether this parser can handle this directory listing * * Directory listing format ------------------------ Volume Unit Referred Ext Used Recfm Lrecl BlkSz Dsorg Dsname * BALP4B 3390 2010/09/09 6 57 FB 80 800 PO BMS BALP8E 3390 2010/09/07 1 2 FB 80 800 PO BMS.BACKUP ARCIVE Not Direct * Access Device KJ.IOP998.ERROR.PL.UNITTEST USS018 3308 2010/01/15 1 15 VB 259 8000 PS NFS.DOC Migrated * OAQPS.INTERIM.CNTYIM.V1.DATA * * Partitioned Dataset listing format: ----------------------------------- Name VV.MM Created Changed Size Init Mod Id * A 01.03 2007/10/22 2009/05/27 20:18 30 3 0 TR6JAM AAA 01.01 2007/06/01 2009/01/27 03:50 183 11 0 TR6AAJ AAJSUSU * 01.00 2005/08/29 2005/08/29 15:11 20 20 0 TR6MGM ADERESSO 01.01 2007/03/15 2007/03/15 16:38 45 45 0 TR6CCU * * * Note: Date Format needs to be deciphered since for other sites it looks like this: BALP4B 3390 09/12/95 6 57 FB 80 * 800 PO BMS */ @Override public boolean isValidFormat( String[] listing ) { if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Checking.Parser" ) ); } if ( listing.length > 0 ) { String[] header = splitMVSLine( listing[0] ); // first line of MVS listings is a header if ( ( header.length == FOLDER_LISTING_LENGTH_NORMAL ) || ( header.length == FOLDER_LISTING_LENGTH_ARCIVE ) ) { if ( header[FOLDER_HEADER_TYPE_IDX].equals( HEADER_VOLUME ) ) { this.partitionedDataset = false; // This is a directory listing, not PDS listing if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.INFO.Detected.Dir" ) ); } return isValidDirectoryFormat( listing ); } else if ( header[FOLDER_HEADER_TYPE_IDX].equals( HEADER_NAME ) ) { this.partitionedDataset = true; // Suspect PDS listing. if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.INFO.Detected.PDS" ) ); } return isValidPDSFormat( listing ); } } } return false; } /** * This parses an individual line from the directory listing. * */ @Override public FTPFile parse( String raw ) throws ParseException { String[] aLine = splitMVSLine( raw ); FTPFile rtn = null; if ( this.partitionedDataset ) { rtn = parsePDSLine( aLine, raw ); // where the real work is done. } else { // Folder List rtn = parseFolder( aLine, raw ); } return rtn; } /** * Could in theory be used to figure out the format of the date/time except that I'd need time on the server to see if * this actually works that way. For now, we ignore the locale and try to figure out the date format ourselves. */ @Override public void setLocale( Locale arg0 ) { // if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Ignore.Locale" ) ); } } /** * Returns parser name. By extensibility oversight in the third-party library we use, this isn't used to match the on * the server (unfortunately). */ public String toString() { return PARSER_KEY; } /************************ Worker Methods *************************/ /** * Parses a Partitioned Dataset Entry, and returns an FTPFile object. * * @param aLine * Split line * @param raw * Unparsed raw string * @return FTPFile unless it's the header row. * @throws ParseException */ protected FTPFile parsePDSLine( String[] aLine, String raw ) throws ParseException { FTPFile rtn = null; if ( aLine[0].equals( HEADER_NAME ) ) { if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Skip.Header" ) ); } return null; } rtn = new FTPFile( raw ); rtn.setName( aLine[0] ); if ( dateTimeFormat == null ) { dateTimeFormat = new SimpleDateFormat( dateFormatString + " HH:mm" ); } rtn.setCreated( dateFormat.parse( aLine[2] ) ); String modDateTime = aLine[3] + ' ' + aLine[4]; rtn.setLastModified( dateTimeFormat.parse( modDateTime ) ); rtn.setDir( false ); return rtn; } /** * Parses a line from a folder listing. * * Note: Returns NULL if it's the header line, if it is ARCIVE or Migrated, if the record format doesn't start with * 'F' or 'V', and if the dsorg doesn't start with 'P'. * * @param aLine * Line split apart * @param raw * Raw line from the transport * @return FTPFile for the line unless it is expressly exluded */ protected FTPFile parseFolder( String[] aLine, String raw ) { if ( aLine[0].equals( HEADER_VOLUME ) ) { if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Skip.Header" ) ); } return null; } // Directory format if ( aLine[0].equals( LINE_TYPE_ARCIVE ) ) { // It's on tape somewhere if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Skip.ARCIVE" ) ); } return null; } if ( aLine[0].equals( LINE_TYPE_MIGRATED ) ) { // It's been moved. if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Skip.Migrated" ) ); } return null; } if ( aLine[5].charAt( 0 ) != 'F' && aLine[5].charAt( 0 ) != 'V' ) { if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Skip.recf" ) ); } return null; } if ( aLine[8].charAt( 0 ) != 'P' ) { // Only handle PO, PS, or PO-E if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Skip.dso" ) ); } return null; } // OK, I think I can handle this. FTPFile rtn = new FTPFile( raw ); rtn.setName( aLine[9] ); // Fake out dates - these are all newly created files / folders rtn.setCreated( new Date() ); rtn.setLastModified( new Date() ); if ( aLine[8].equals( ENTRY_FILE_TYPE ) ) { if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Found.File", aLine[9] ) ); } // This is a file... rtn.setDir( false ); long l = -1; try { l = Long.parseLong( aLine[4] ); } catch ( Exception ignored ) { // Ignore errors } rtn.setSize( l ); } else { if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Found.Folder", aLine[9] ) ); } rtn.setDir( true ); } // Left this code here in case last time accessed becomes important. // For directory items, this is just the last time accessed // Date dt = dateFormat.parse(aLine[2]); // return rtn; } /************************ Utility Methods *************************/ /** * This is a split + trim function. The String.split method doesn't work well if there are a multiple contiguous * white-space characters. StringTokenizer handles this very well. This should never fail to return an array, even if * the array is empty. In other words, this should never return null. * * @param raw * The string to tokenize from the MainFrame * @return String array of all the elements from the parse. */ protected String[] splitMVSLine( String raw ) { if ( raw == null ) { return new String[] {}; } StringTokenizer st = new StringTokenizer( raw ); String[] rtn = new String[st.countTokens()]; int i = 0; while ( st.hasMoreTokens() ) { String nextToken = st.nextToken(); rtn[i] = nextToken.trim(); i++; } return rtn; } /** * Returns true if this seems to be a recognized MVS folder (not PDS) listing. * * @param listing * @return true if by all appearances this is a listing of an MVS folder */ protected boolean isValidDirectoryFormat( String[] listing ) { String[] aLine; for ( int i = 1; i < listing.length; i++ ) { aLine = splitMVSLine( listing[i] ); if ( ( aLine.length == 2 ) && ( aLine[0].equals( LINE_TYPE_MIGRATED ) ) ) { if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Detected.Migrated" ) ); } } else if ( aLine.length != 10 && ( !aLine[0].equals( LINE_TYPE_ARCIVE ) ) ) { // 10 = regular, ARCIVE=on tape log.logError( BaseMessages.getString( PKG, "MVSFileParser.ERROR.Invalid.Folder.Line", listing[i] ) ); return false; } if ( dateFormatString != null ) { // validate date if ( !checkDateFormat( aLine[2] ) ) { return false; } } else { if ( aLine.length == 10 ) { // Try to parse the date. guessDateFormat( aLine[2] ); } } } return true; } /** * Returns true if this seems to be a recognized MVS PDS listing (not folder). * * @param listing * @return true if by all appearances this is a listing of the contents of a PDS */ protected boolean isValidPDSFormat( String[] listing ) { String[] aLine; for ( int i = 1; i < listing.length; i++ ) { aLine = splitMVSLine( listing[i] ); if ( aLine.length != 9 ) { // 9 because there are two fields for changed... log.logError( BaseMessages.getString( PKG, "MVSFileParser.ERROR.Invalid.PDS.Line", listing[i] ) ); return false; } if ( dateFormatString != null ) { if ( !checkDateFormat( aLine[3] ) ) { return false; } } else { guessDateFormat( aLine[2] ); } } return true; } /* * This method will try the date format string to make sure it knows how to parse the dates. If it fails a parse it * will try the alternate format if available. For example, if the first three files have these dates: 2010/03/04 * 2010/07/09 2010/23/06 * * For the first two, either yyyy/MM/dd or yyyy/dd/MM would work. When the parse on 2010/23/06 fails, it will try the * alternate, succeed, and carry on. * * The weakness of this approach is if all files have valid inter- changable day/month on all dates. In that case, all * would be detected as yyyy/MM/dd which may be incorrect. If this is a problem, the correct fix is to set the date * format on the parser, or play with the Locale and see if that can be used to figure out what the real format from * the server is. */ protected boolean checkDateFormat( String dateStr ) { try { dateFormat.parse( dateStr ); } catch ( ParseException ex ) { if ( log.isDebug() ) { if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Date.Parse.Error" ) ); } } if ( ( alternateFormatString != null ) ) { if ( log.isDebug() ) { if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Date.Parse.Choose.Alt" ) ); } } dateFormatString = alternateFormatString; dateFormat = new SimpleDateFormat( dateFormatString ); alternateFormatString = null; try { dateFormat.parse( dateStr ); } catch ( ParseException ex2 ) { return false; } } else { log.logError( BaseMessages.getString( PKG, "MVSFileParser.ERROR.Date.Parse.Fail", dateStr ) ); return false; } } return true; } /** * This method will look at the incoming date string and try to figure out the format of the date. Googling on the * internet showed several possible looks to the date: * * dd/MM/yy yy/MM/dd MM/dd/yy yyyy/MM/dd yyyy/dd/MM * * I never saw samples showing dd/MM/yyyy but I suppose it's possible. Not happy with this algorithm because it feels * clumsy. It works, but it's not very elegant (time crunch). * * @param dateStr */ protected void guessDateFormat( String dateStr ) { if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Guess.Date" ) ); } String[] dateSplit = dateStr.split( "/" ); String yrFmt = null; int yrPos = -1; int dayPos = -1; // quick look for either yyyy/xx/xx or xx/xx/yyyy for ( int i = 0; i < dateSplit.length; i++ ) { int aDigit = Integer.parseInt( dateSplit[i] ); if ( dateSplit[i].length() == 4 ) { yrFmt = "yyyy"; yrPos = i; } else if ( aDigit > 31 ) { // found 2-digit year yrFmt = "yy"; yrPos = i; } else if ( aDigit > 12 ) { // definitely found a # <=31, dayPos = i; } } if ( yrFmt != null ) { StringBuffer fmt = new StringBuffer(); if ( dayPos >= 0 ) { // OK, we know everything. String[] tmp = new String[3]; tmp[yrPos] = yrFmt; tmp[dayPos] = "dd"; for ( int i = 0; i < tmp.length; i++ ) { fmt.append( i > 0 ? "/" : "" ); fmt.append( tmp[i] == null ? "MM" : tmp[i] ); } if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Guess.Date.Obvious" ) ); } } else { // OK, we have something like 2010/01/01 - I can't // tell month from day. So, we'll guess. If it doesn't work on a later // date, we'll flip it (the alternate). StringBuffer altFmt = new StringBuffer(); if ( yrPos == 0 ) { fmt.append( yrFmt ).append( "/MM/dd" ); altFmt.append( yrFmt ).append( "/dd/MM" ); } else { fmt.append( "MM/dd/" ).append( yrFmt ); altFmt.append( "dd/MM/" ).append( yrFmt ); } this.alternateFormatString = altFmt.toString(); if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Guess.Date.Ambiguous" ) ); } } this.dateFormatString = fmt.toString(); this.dateFormat = new SimpleDateFormat( dateFormatString ); if ( log.isDebug() ) { log.logDebug( BaseMessages .getString( PKG, "MVSFileParser.DEBUG.Guess.Date.Decided", this.dateFormatString ) ); } try { dateFormat.parse( dateStr ); } catch ( ParseException ex ) { if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Guess.Date.Unparsable", dateStr ) ); } } } else { // looks ilke something like 01/02/05 - where's the year? if ( log.isDebug() ) { log.logDebug( BaseMessages.getString( PKG, "MVSFileParser.DEBUG.Guess.Date.Year.Ambiguous" ) ); } return; } } /*************************** Getters and Setters **************************/ /** * @return true if listing is a PDS */ public boolean isPartitionedDataset() { return this.partitionedDataset; } /** * Returns the date format string in use for parsing date in the listing. * * @return string format */ public String getDateFormatString() { return this.dateFormatString; } /** * Provides ability to pre-specify the format that the parser will use to parse dates. * * @param value * the string to set. */ public void setDateFormatString( String value ) { this.dateFormatString = value; } }
apache-2.0
troyliu0105/Meizhi
app/src/main/java/me/drakeet/meizhi/model/Soul.java
1255
/* * Copyright (C) 2015 Drakeet <drakeet.me@gmail.com> * * This file is part of Meizhi * * Meizhi is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Meizhi is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Meizhi. If not, see <http://www.gnu.org/licenses/>. */ package me.drakeet.meizhi.model; import com.litesuits.orm.db.annotation.Column; import com.litesuits.orm.db.annotation.NotNull; import com.litesuits.orm.db.annotation.PrimaryKey; import com.litesuits.orm.db.annotation.Unique; import java.io.Serializable; /** * Created by drakeet(http://drakeet.me) * Date: 8/18/15 13:55 */ public class Soul implements Serializable { @PrimaryKey(PrimaryKey.AssignType.AUTO_INCREMENT) @Column("_id") protected long id; @NotNull @Unique @Column("objectId") public String objectId; }
gpl-3.0
Jimexist/presto
presto-main/src/main/java/com/facebook/presto/GroupByHashPageIndexerFactory.java
1786
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto; import com.facebook.presto.spi.Page; import com.facebook.presto.spi.PageIndexer; import com.facebook.presto.spi.PageIndexerFactory; import com.facebook.presto.spi.type.Type; import com.facebook.presto.sql.gen.JoinCompiler; import javax.inject.Inject; import java.util.List; import static java.util.Objects.requireNonNull; public class GroupByHashPageIndexerFactory implements PageIndexerFactory { private final JoinCompiler joinCompiler; @Inject public GroupByHashPageIndexerFactory(JoinCompiler joinCompiler) { this.joinCompiler = requireNonNull(joinCompiler, "joinCompiler is null"); } @Override public PageIndexer createPageIndexer(List<? extends Type> types) { if (types.isEmpty()) { return new NoHashPageIndexer(); } return new GroupByHashPageIndexer(types, joinCompiler); } private static class NoHashPageIndexer implements PageIndexer { @Override public int[] indexPage(Page page) { return new int[page.getPositionCount()]; } @Override public int getMaxIndex() { return 0; } } }
apache-2.0
Soya93/Extract-Refactoring
plugins/git4idea/src/git4idea/push/GitPushNativeResultParser.java
5704
/* * Copyright 2000-2014 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package git4idea.push; import com.intellij.openapi.diagnostic.Logger; import com.intellij.util.containers.ContainerUtil; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Parses the output received from git push and returns a result. * NB: It is assumed that only one ref is pushed => there is only one result in the output. * * Output format described by git-push man: * <pre> * The status of the push is output in tabular form, with each line representing the status of a single ref. * If --porcelain is used, then each line of the output is of the form: * * &lt;flag&gt; \t &lt;from&gt;:&lt;to&gt; \t &lt;summary&gt; (&lt;reason&gt;) * * The status of up-to-date refs is shown only if --porcelain or --verbose option is used. * * flag * A single character indicating the status of the ref: * (space) * for a successfully pushed fast-forward; * + * for a successful forced update; * - * for a successfully deleted ref; * * * for a successfully pushed new ref; * ! * for a ref that was rejected or failed to push; and * = * for a ref that was up to date and did not need pushing. * * summary * For a successfully pushed ref, the summary shows the old and new values of the ref in a form * suitable for using as an argument to git log (this is <old>..<new> in most cases, and * <old>...<new> for forced non-fast-forward updates). * * For a failed update, more details are given: * rejected * Git did not try to send the ref at all, typically because it is not a fast-forward and you * did not force the update. * * remote rejected * The remote end refused the update. Usually caused by a hook on the remote side, or because * the remote repository has one of the following safety options in effect: * receive.denyCurrentBranch (for pushes to the checked out branch), receive.denyNonFastForwards * (for forced non-fast-forward updates), receive.denyDeletes or receive.denyDeleteCurrent. See * git-config(1). * * remote failure * The remote end did not report the successful update of the ref, perhaps because of a * temporary error on the remote side, a break in the network connection, or other transient * error. * * from * The name of the local ref being pushed, minus its refs/<type>/ prefix. In the case of deletion, * the name of the local ref is omitted. * * to * The name of the remote ref being updated, minus its refs/<type>/ prefix. * * reason * A human-readable explanation. In the case of successfully pushed refs, no explanation is needed. * For a failed ref, the reason for failure is described. * </pre> */ public class GitPushNativeResultParser { private static final Logger LOG = Logger.getInstance(GitPushNativeResultParser.class); private static final Pattern PATTERN = Pattern.compile("^.*([ +\\-\\*!=])\t" + // flag "(\\S+):(\\S+)\t" + // from:to "([^(]+)" + // summary maybe with a trailing space "(?:\\((.+)\\))?.*$"); // reason private static final Pattern RANGE = Pattern.compile("[0-9a-f]+[\\.]{2,3}[0-9a-f]+"); @NotNull public static List<GitPushNativeResult> parse(@NotNull List<String> output) { List<GitPushNativeResult> results = ContainerUtil.newArrayList(); for (String line : output) { Matcher matcher = PATTERN.matcher(line); if (matcher.matches()) { results.add(parseRefResult(matcher, line)); } } return results; } @Nullable private static GitPushNativeResult parseRefResult(Matcher matcher, String line) { String flag = matcher.group(1); String from = matcher.group(2); String to = matcher.group(3); String summary = matcher.group(4).trim(); // the summary can have a trailing space (to simplify the regexp) @Nullable String reason = matcher.group(5); GitPushNativeResult.Type type = parseType(flag); if (type == null) { LOG.error("Couldn't parse push result type from flag [" + flag + "] in [" + line + "]"); return null; } if (matcher.groupCount() < 4) { return null; } String range = RANGE.matcher(summary).matches() ? summary : null; return new GitPushNativeResult(type, from, reason, range); } private static GitPushNativeResult.Type parseType(String flag) { switch(flag.charAt(0)) { case ' ' : return GitPushNativeResult.Type.SUCCESS; case '+' : return GitPushNativeResult.Type.FORCED_UPDATE; case '-' : return GitPushNativeResult.Type.DELETED; case '*' : return GitPushNativeResult.Type.NEW_REF; case '!' : return GitPushNativeResult.Type.REJECTED; case '=' : return GitPushNativeResult.Type.UP_TO_DATE; } return null; } }
apache-2.0
plxaye/chromium
src/chrome/android/testshell/javatests/src/org/chromium/chrome/testshell/TabShellTabUtils.java
2520
// Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.chrome.testshell; import org.chromium.content.browser.ContentViewClient; import org.chromium.content.browser.test.util.CallbackHelper; import org.chromium.content.browser.test.util.TestCallbackHelperContainer; import org.chromium.content.browser.test.util.TestContentViewClient; import org.chromium.content.browser.test.util.TestContentViewClientWrapper; import org.chromium.content.browser.test.util.TestWebContentsObserver; /** * A utility class that contains methods generic to all Tabs tests. */ public class TabShellTabUtils { private static TestContentViewClient createTestContentViewClientForTab(TestShellTab tab) { ContentViewClient client = tab.getContentView().getContentViewClient(); if (client instanceof TestContentViewClient) return (TestContentViewClient) client; TestContentViewClient testClient = new TestContentViewClientWrapper(client); tab.getContentView().setContentViewClient(testClient); return testClient; } public static class TestCallbackHelperContainerForTab extends TestCallbackHelperContainer implements TestShellTabObserver { private final OnCloseTabHelper mOnCloseTabHelper; public TestCallbackHelperContainerForTab(TestShellTab tab) { super(createTestContentViewClientForTab(tab), new TestWebContentsObserver(tab.getContentView().getContentViewCore())); mOnCloseTabHelper = new OnCloseTabHelper(); tab.addObserver(this); } public static class OnCloseTabHelper extends CallbackHelper { } public OnCloseTabHelper getOnCloseTabHelper() { return mOnCloseTabHelper; } @Override public void onLoadProgressChanged(TestShellTab tab, int progress) { } @Override public void onUpdateUrl(TestShellTab tab, String url) { } @Override public void onCloseTab(TestShellTab tab) { mOnCloseTabHelper.notifyCalled(); } } /** * Creates, binds and returns a TestCallbackHelperContainer for a given Tab. */ public static TestCallbackHelperContainerForTab getTestCallbackHelperContainer( final TestShellTab tab) { return tab == null ? null : new TestCallbackHelperContainerForTab(tab); } }
apache-2.0
franz1981/activemq-artemis
artemis-protocols/artemis-openwire-protocol/src/main/java/org/apache/activemq/artemis/core/protocol/openwire/amq/AMQSingleConsumerBrokerExchange.java
1712
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.activemq.artemis.core.protocol.openwire.amq; import org.apache.activemq.command.MessageAck; import org.apache.activemq.command.MessagePull; public class AMQSingleConsumerBrokerExchange extends AMQConsumerBrokerExchange { private AMQConsumer consumer; public AMQSingleConsumerBrokerExchange(AMQSession amqSession, AMQConsumer consumer) { super(amqSession); this.consumer = consumer; } @Override public void processMessagePull(MessagePull messagePull) throws Exception { consumer.processMessagePull(messagePull); } @Override public void removeConsumer() throws Exception { consumer.removeConsumer(); } @Override public void acknowledge(MessageAck ack) throws Exception { consumer.acknowledge(ack); } @Override public void updateConsumerPrefetchSize(int prefetch) { consumer.setPrefetchSize(prefetch); } }
apache-2.0
fernandozhu/elasticsearch
core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java
6020
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.action; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.validate.query.ShardValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.ArrayList; import java.util.Base64; import java.util.Collections; import java.util.List; public class ShardValidateQueryRequestTests extends ESTestCase { protected NamedWriteableRegistry namedWriteableRegistry; public void setUp() throws Exception { super.setUp(); IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); List<NamedWriteableRegistry.Entry> entries = new ArrayList<>(); entries.addAll(indicesModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables()); namedWriteableRegistry = new NamedWriteableRegistry(entries); } public void testSerialize() throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest("indices"); validateQueryRequest.query(QueryBuilders.termQuery("field", "value")); validateQueryRequest.rewrite(true); validateQueryRequest.explain(false); validateQueryRequest.types("type1", "type2"); ShardValidateQueryRequest request = new ShardValidateQueryRequest(new ShardId("index", "foobar", 1), new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"}), validateQueryRequest); request.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { ShardValidateQueryRequest readRequest = new ShardValidateQueryRequest(); readRequest.readFrom(in); assertEquals(request.filteringAliases(), readRequest.filteringAliases()); assertArrayEquals(request.types(), readRequest.types()); assertEquals(request.explain(), readRequest.explain()); assertEquals(request.query(), readRequest.query()); assertEquals(request.rewrite(), readRequest.rewrite()); assertEquals(request.shardId(), readRequest.shardId()); } } } // BWC test for changes from #20916 public void testSerialize50Request() throws IOException { ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest("indices"); validateQueryRequest.query(QueryBuilders.termQuery("field", "value")); validateQueryRequest.rewrite(true); validateQueryRequest.explain(false); validateQueryRequest.types("type1", "type2"); ShardValidateQueryRequest request = new ShardValidateQueryRequest(new ShardId("index", "foobar", 1), new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"}), validateQueryRequest); BytesArray requestBytes = new BytesArray(Base64.getDecoder() // this is a base64 encoded request generated with the same input .decode("AAVpbmRleAZmb29iYXIBAQdpbmRpY2VzBAR0ZXJtP4AAAAAFZmllbGQVBXZhbHVlAgV0eXBlMQV0eXBlMgIGYWxpYXMwBmFsaWFzMQABAA")); try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) { in.setVersion(Version.V_5_0_0); ShardValidateQueryRequest readRequest = new ShardValidateQueryRequest(); readRequest.readFrom(in); assertEquals(0, in.available()); assertArrayEquals(request.filteringAliases().getAliases(), readRequest.filteringAliases().getAliases()); expectThrows(IllegalStateException.class, () -> readRequest.filteringAliases().getQueryBuilder()); assertArrayEquals(request.types(), readRequest.types()); assertEquals(request.explain(), readRequest.explain()); assertEquals(request.query(), readRequest.query()); assertEquals(request.rewrite(), readRequest.rewrite()); assertEquals(request.shardId(), readRequest.shardId()); BytesStreamOutput output = new BytesStreamOutput(); output.setVersion(Version.V_5_0_0); readRequest.writeTo(output); assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef()); } } }
apache-2.0
1nv4d3r5/facebook-android-sdk
samples/SessionLoginSample/src/com/facebook/samples/sessionlogin/LoginUsingCustomFragmentActivity.java
960
/** * Copyright 2010-present Facebook. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.samples.sessionlogin; import android.os.Bundle; import android.support.v4.app.FragmentActivity; public class LoginUsingCustomFragmentActivity extends FragmentActivity { public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.custom_fragment_activity); } }
apache-2.0
akosyakov/intellij-community
platform/platform-impl/src/com/intellij/ide/ui/customization/CustomActionsSchema.java
15297
/* * Copyright 2000-2009 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.ide.ui.customization; import com.intellij.icons.AllIcons; import com.intellij.ide.IdeBundle; import com.intellij.openapi.actionSystem.ActionGroup; import com.intellij.openapi.actionSystem.ActionManager; import com.intellij.openapi.actionSystem.AnAction; import com.intellij.openapi.actionSystem.IdeActions; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.application.PathManager; import com.intellij.openapi.components.ExportableComponent; import com.intellij.openapi.components.ServiceManager; import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.keymap.impl.ui.ActionsTreeUtil; import com.intellij.openapi.keymap.impl.ui.Group; import com.intellij.openapi.util.*; import com.intellij.openapi.util.io.FileUtil; import com.intellij.openapi.util.text.StringUtil; import com.intellij.openapi.vfs.VfsUtil; import com.intellij.openapi.vfs.VfsUtilCore; import com.intellij.openapi.wm.ex.WindowManagerEx; import com.intellij.openapi.wm.impl.IdeFrameImpl; import com.intellij.util.ImageLoader; import org.jdom.Element; import org.jetbrains.annotations.NonNls; import org.jetbrains.annotations.NotNull; import javax.swing.*; import javax.swing.tree.DefaultMutableTreeNode; import java.awt.*; import java.io.File; import java.io.IOException; import java.util.*; import java.util.List; /** * User: anna * Date: Jan 20, 2005 */ public class CustomActionsSchema implements ExportableComponent, NamedJDOMExternalizable { @NonNls private static final String ACTIONS_SCHEMA = "custom_actions_schema"; @NonNls private static final String ACTIVE = "active"; @NonNls private static final String ELEMENT_ACTION = "action"; @NonNls private static final String ATTRIBUTE_ID = "id"; @NonNls private static final String ATTRIBUTE_ICON = "icon"; private final Map<String, String> myIconCustomizations = new HashMap<String, String>(); private ArrayList<ActionUrl> myActions = new ArrayList<ActionUrl>(); private final HashMap<String , ActionGroup> myIdToActionGroup = new HashMap<String, ActionGroup>(); private final List<Pair> myIdToNameList = new ArrayList<Pair>(); @NonNls private static final String GROUP = "group"; private static final Logger LOG = Logger.getInstance("#" + CustomActionsSchema.class.getName()); public CustomActionsSchema() { myIdToNameList.add(new Pair(IdeActions.GROUP_MAIN_MENU, ActionsTreeUtil.MAIN_MENU_TITLE)); myIdToNameList.add(new Pair(IdeActions.GROUP_MAIN_TOOLBAR, ActionsTreeUtil.MAIN_TOOLBAR)); myIdToNameList.add(new Pair(IdeActions.GROUP_EDITOR_POPUP, ActionsTreeUtil.EDITOR_POPUP)); myIdToNameList.add(new Pair(IdeActions.GROUP_EDITOR_GUTTER, "Editor Gutter Popup Menu")); myIdToNameList.add(new Pair(IdeActions.GROUP_EDITOR_TAB_POPUP, ActionsTreeUtil.EDITOR_TAB_POPUP)); myIdToNameList.add(new Pair(IdeActions.GROUP_PROJECT_VIEW_POPUP, ActionsTreeUtil.PROJECT_VIEW_POPUP)); myIdToNameList.add(new Pair(IdeActions.GROUP_SCOPE_VIEW_POPUP, "Scope View Popup Menu")); myIdToNameList.add(new Pair(IdeActions.GROUP_FAVORITES_VIEW_POPUP, ActionsTreeUtil.FAVORITES_POPUP)); myIdToNameList.add(new Pair(IdeActions.GROUP_COMMANDER_POPUP, ActionsTreeUtil.COMMANDER_POPUP)); myIdToNameList.add(new Pair(IdeActions.GROUP_J2EE_VIEW_POPUP, ActionsTreeUtil.J2EE_POPUP)); myIdToNameList.add(new Pair(IdeActions.GROUP_NAVBAR_POPUP, "Navigation Bar")); myIdToNameList.add(new Pair("NavBarToolBar", "Navigation Bar Toolbar")); CustomizableActionGroupProvider.CustomizableActionGroupRegistrar registrar = new CustomizableActionGroupProvider.CustomizableActionGroupRegistrar() { @Override public void addCustomizableActionGroup(@NotNull String groupId, @NotNull String groupTitle) { myIdToNameList.add(new Pair(groupId, groupTitle)); } }; for (CustomizableActionGroupProvider provider : CustomizableActionGroupProvider.EP_NAME.getExtensions()) { provider.registerGroups(registrar); } } public static CustomActionsSchema getInstance() { return ServiceManager.getService(CustomActionsSchema.class); } public void addAction(ActionUrl url) { myActions.add(url); resortActions(); } public ArrayList<ActionUrl> getActions() { return myActions; } public void setActions(final ArrayList<ActionUrl> actions) { myActions = actions; resortActions(); } public void copyFrom(CustomActionsSchema result) { myIdToActionGroup.clear(); myActions.clear(); myIconCustomizations.clear(); for (ActionUrl actionUrl : result.myActions) { final ActionUrl url = new ActionUrl(new ArrayList<String>(actionUrl.getGroupPath()), actionUrl.getComponent(), actionUrl.getActionType(), actionUrl.getAbsolutePosition()); url.setInitialPosition(actionUrl.getInitialPosition()); myActions.add(url); } resortActions(); myIconCustomizations.putAll(result.myIconCustomizations); } private void resortActions() { Collections.sort(myActions, ActionUrlComparator.INSTANCE); } public boolean isModified(CustomActionsSchema schema) { final ArrayList<ActionUrl> storedActions = schema.getActions(); if (ApplicationManager.getApplication().isUnitTestMode() && !storedActions.isEmpty()) { System.err.println("stored: " + storedActions.toString()); System.err.println("actual: " + getActions().toString()); } if (storedActions.size() != getActions().size()) { return true; } for (int i = 0; i < getActions().size(); i++) { if (!getActions().get(i).equals(storedActions.get(i))) { return true; } } if (schema.myIconCustomizations.size() != myIconCustomizations.size()) return true; for (String actionId : myIconCustomizations.keySet()) { if (!Comparing.strEqual(schema.getIconPath(actionId), getIconPath(actionId))) return true; } return false; } public void readExternal(Element element) throws InvalidDataException { DefaultJDOMExternalizer.readExternal(this, element); Element schElement = element; final String activeName = element.getAttributeValue(ACTIVE); if (activeName != null) { for (Element toolbarElement : (Iterable<Element>)element.getChildren(ACTIONS_SCHEMA)) { for (Object o : toolbarElement.getChildren("option")) { if (Comparing.strEqual(((Element)o).getAttributeValue("name"), "myName") && Comparing.strEqual(((Element)o).getAttributeValue("value"), activeName)) { schElement = toolbarElement; break; } } } } for (Object groupElement : schElement.getChildren(GROUP)) { ActionUrl url = new ActionUrl(); url.readExternal((Element)groupElement); myActions.add(url); } if (ApplicationManager.getApplication().isUnitTestMode()) { System.err.println("read custom actions: " + myActions.toString()); } readIcons(element); } public void writeExternal(Element element) throws WriteExternalException { DefaultJDOMExternalizer.writeExternal(this, element); writeActions(element); writeIcons(element); } private void writeActions(Element element) throws WriteExternalException { for (ActionUrl group : myActions) { Element groupElement = new Element(GROUP); group.writeExternal(groupElement); element.addContent(groupElement); } } public AnAction getCorrectedAction(String id) { if (! myIdToNameList.contains(new Pair(id, ""))){ return ActionManager.getInstance().getAction(id); } if (myIdToActionGroup.get(id) == null) { for (Pair pair : myIdToNameList) { if (pair.first.equals(id)){ final ActionGroup actionGroup = (ActionGroup)ActionManager.getInstance().getAction(id); if (actionGroup != null) { //J2EE/Commander plugin was disabled myIdToActionGroup.put(id, CustomizationUtil.correctActionGroup(actionGroup, this, pair.second, pair.second)); } } } } return myIdToActionGroup.get(id); } public void resetMainActionGroups() { for (Pair pair : myIdToNameList) { final ActionGroup actionGroup = (ActionGroup)ActionManager.getInstance().getAction(pair.first); if (actionGroup != null) { //J2EE/Commander plugin was disabled myIdToActionGroup.put(pair.first, CustomizationUtil.correctActionGroup(actionGroup, this, pair.second, pair.second)); } } } public void fillActionGroups(DefaultMutableTreeNode root){ final ActionManager actionManager = ActionManager.getInstance(); for (Pair pair : myIdToNameList) { final ActionGroup actionGroup = (ActionGroup)actionManager.getAction(pair.first); if (actionGroup != null) { //J2EE/Commander plugin was disabled root.add(ActionsTreeUtil.createNode(ActionsTreeUtil.createGroup(actionGroup, pair.second, null, null, true, null, false))); } } } public boolean isCorrectActionGroup(ActionGroup group, String defaultGroupName) { if (myActions.isEmpty()){ return false; } final String text = group.getTemplatePresentation().getText(); if (!StringUtil.isEmpty(text)) { for (ActionUrl url : myActions) { if (url.getGroupPath().contains(text) || url.getGroupPath().contains(defaultGroupName)) { return true; } if (url.getComponent() instanceof Group) { final Group urlGroup = (Group)url.getComponent(); String id = urlGroup.getName() != null ? urlGroup.getName() : urlGroup.getId(); if (id == null || id.equals(text) || id.equals(defaultGroupName)) { return true; } } } return false; } return true; } public List<ActionUrl> getChildActions(final ActionUrl url) { ArrayList<ActionUrl> result = new ArrayList<ActionUrl>(); final ArrayList<String> groupPath = url.getGroupPath(); for (ActionUrl actionUrl : myActions) { int index = 0; if (groupPath.size() <= actionUrl.getGroupPath().size()){ while (index < groupPath.size()){ if (!Comparing.equal(groupPath.get(index), actionUrl.getGroupPath().get(index))){ break; } index++; } if (index == groupPath.size()){ result.add(actionUrl); } } } return result; } public void removeIconCustomization(String actionId) { myIconCustomizations.remove(actionId); } public void addIconCustomization(String actionId, String iconPath) { myIconCustomizations.put(actionId, iconPath != null ? FileUtil.toSystemIndependentName(iconPath) : null); } public String getIconPath(String actionId) { final String path = myIconCustomizations.get(actionId); return path == null ? "" : path; } private void readIcons(Element parent) { for (Object actionO : parent.getChildren(ELEMENT_ACTION)) { Element action = (Element)actionO; final String actionId = action.getAttributeValue(ATTRIBUTE_ID); final String iconPath = action.getAttributeValue(ATTRIBUTE_ICON); if (actionId != null){ myIconCustomizations.put(actionId, iconPath); } } SwingUtilities.invokeLater(new Runnable() { public void run() { initActionIcons(); } }); } private void writeIcons(Element parent) { for (String actionId : myIconCustomizations.keySet()) { Element action = new Element(ELEMENT_ACTION); action.setAttribute(ATTRIBUTE_ID, actionId); String icon = myIconCustomizations.get(actionId); if (icon != null) { action.setAttribute(ATTRIBUTE_ICON, icon); } parent.addContent(action); } } private void initActionIcons() { ActionManager actionManager = ActionManager.getInstance(); for (String actionId : myIconCustomizations.keySet()) { final AnAction anAction = actionManager.getAction(actionId); if (anAction != null) { Icon icon; final String iconPath = myIconCustomizations.get(actionId); if (iconPath != null && new File(FileUtil.toSystemDependentName(iconPath)).exists()) { Image image = null; try { image = ImageLoader.loadFromStream(VfsUtilCore.convertToURL(VfsUtil.pathToUrl(iconPath)).openStream()); } catch (IOException e) { LOG.debug(e); } icon = image != null ? IconLoader.getIcon(image) : null; } else { icon = AllIcons.Toolbar.Unknown; } if (anAction.getTemplatePresentation() != null) { anAction.getTemplatePresentation().setIcon(icon); anAction.getTemplatePresentation().setDisabledIcon(IconLoader.getDisabledIcon(icon)); anAction.setDefaultIcon(false); } } } final IdeFrameImpl frame = WindowManagerEx.getInstanceEx().getFrame(null); if (frame != null) { frame.updateView(); } } @NotNull public File[] getExportFiles() { return new File[]{PathManager.getOptionsFile(this)}; } @NotNull public String getPresentableName() { return IdeBundle.message("title.custom.actions.schemas"); } public String getExternalFileName() { return "customization"; } private static class Pair { String first; String second; public Pair(final String first, final String second) { this.first = first; this.second = second; } public int hashCode() { return first.hashCode(); } public boolean equals(Object obj) { return obj instanceof Pair && first.equals(((Pair)obj).first); } } private static class ActionUrlComparator implements Comparator<ActionUrl> { public static ActionUrlComparator INSTANCE = new ActionUrlComparator(); private static final int DELETED = 1; private static final int ADDED = 2; public int compare(ActionUrl u1, ActionUrl u2) { final int w1 = getEquivalenceClass(u1); final int w2 = getEquivalenceClass(u2); if (w1 != w2) { return w1 - w2; // deleted < added < others } if (w1 == DELETED) { return u2.getAbsolutePosition() - u1.getAbsolutePosition(); // within DELETED equivalence class urls with greater position go first } return u1.getAbsolutePosition() - u2.getAbsolutePosition(); // within ADDED equivalence class: urls with lower position go first } private static int getEquivalenceClass(ActionUrl url) { switch (url.getActionType()) { case ActionUrl.DELETED: return 1; case ActionUrl.ADDED: return 2; default: return 3; } } } }
apache-2.0
matyb/java-koans
lib/src/main/java/com/sandwich/util/io/directories/ProductionDirectories.java
214
package com.sandwich.util.io.directories; public class ProductionDirectories extends DirectorySet { public String getProjectDir() { return "koans"; } public String getSourceDir() { return "src"; } }
apache-2.0
eric-kansas/robolectric
robolectric/src/test/java/org/robolectric/internal/bytecode/ShadowWranglerTest.java
9800
package org.robolectric.internal.bytecode; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.robolectric.TestRunners; import org.robolectric.annotation.Config; import org.robolectric.internal.bytecode.testing.Foo; import org.robolectric.internal.bytecode.testing.ShadowFoo; import org.robolectric.annotation.Implementation; import org.robolectric.annotation.Implements; import org.robolectric.annotation.internal.Instrument; import org.robolectric.annotation.RealObject; import org.robolectric.internal.ShadowExtractor; import java.io.IOException; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.*; @RunWith(TestRunners.WithoutDefaults.class) public class ShadowWranglerTest { private String name; @Before public void setUp() throws Exception { name = "context"; } @Test @Config(shadows = {ShadowForAClassWithDefaultConstructor_HavingNoConstructorDelegate.class}) public void testConstructorInvocation_WithDefaultConstructorAndNoConstructorDelegateOnShadowClass() throws Exception { AClassWithDefaultConstructor instance = new AClassWithDefaultConstructor(); assertThat(ShadowExtractor.extract(instance)).isExactlyInstanceOf(ShadowForAClassWithDefaultConstructor_HavingNoConstructorDelegate.class); assertThat(instance.initialized).isTrue(); } @Test @Config(shadows = { ShadowFoo.class }) public void testConstructorInvocation() throws Exception { Foo foo = new Foo(name); assertSame(name, shadowOf(foo).name); } @Test @Config(shadows = {ShadowFoo.class}) public void testRealObjectAnnotatedFieldsAreSetBeforeConstructorIsCalled() throws Exception { Foo foo = new Foo(name); assertSame(name, shadowOf(foo).name); assertSame(foo, shadowOf(foo).realFooField); assertSame(foo, shadowOf(foo).realFooInConstructor); assertSame(foo, shadowOf(foo).realFooInParentConstructor); } @Test @Config(shadows = {ShadowFoo.class}) public void testMethodDelegation() throws Exception { Foo foo = new Foo(name); assertSame(name, foo.getName()); } @Test @Config(shadows = {WithEquals.class}) public void testEqualsMethodDelegation() throws Exception { Foo foo1 = new Foo(name); Foo foo2 = new Foo(name); assertEquals(foo1, foo2); } @Test @Config(shadows = {WithEquals.class}) public void testHashCodeMethodDelegation() throws Exception { Foo foo = new Foo(name); assertEquals(42, foo.hashCode()); } @Test @Config(shadows = {WithToString.class}) public void testToStringMethodDelegation() throws Exception { Foo foo = new Foo(name); assertEquals("the expected string", foo.toString()); } @Test @Config(shadows = {ShadowFoo.class}) public void testShadowSelectionSearchesSuperclasses() throws Exception { TextFoo textFoo = new TextFoo(name); assertEquals(ShadowFoo.class, ShadowExtractor.extract(textFoo).getClass()); } @Test @Config(shadows = {ShadowFoo.class, ShadowTextFoo.class}) public void shouldUseMostSpecificShadow() throws Exception { TextFoo textFoo = new TextFoo(name); assertThat(shadowOf(textFoo)).isInstanceOf(ShadowTextFoo.class); } @Test public void testPrimitiveArrays() throws Exception { Class<?> objArrayClass = ShadowWrangler.loadClass("java.lang.Object[]", getClass().getClassLoader()); assertTrue(objArrayClass.isArray()); assertEquals(Object.class, objArrayClass.getComponentType()); Class<?> intArrayClass = ShadowWrangler.loadClass("int[]", getClass().getClassLoader()); assertTrue(intArrayClass.isArray()); assertEquals(Integer.TYPE, intArrayClass.getComponentType()); } @Test @Config(shadows = ShadowThrowInShadowMethod.class) public void shouldRemoveNoiseFromShadowedStackTraces() throws Exception { ThrowInShadowMethod instance = new ThrowInShadowMethod(); Exception e = null; try { instance.method(); } catch (Exception e1) { e = e1; } assertNotNull(e); assertEquals(IOException.class, e.getClass()); assertEquals("fake exception", e.getMessage()); StackTraceElement[] stackTrace = e.getStackTrace(); assertThat(stackTrace[0].getClassName()).isEqualTo(ShadowThrowInShadowMethod.class.getName()); assertThat(stackTrace[0].getMethodName()).isEqualTo("method"); assertThat(stackTrace[0].getLineNumber()).isGreaterThan(0); assertThat(stackTrace[1].getClassName()).isEqualTo(ThrowInShadowMethod.class.getName()); assertThat(stackTrace[1].getMethodName()).isEqualTo("method"); assertThat(stackTrace[1].getLineNumber()).isLessThan(0); assertThat(stackTrace[2].getClassName()).isEqualTo(ShadowWranglerTest.class.getName()); assertThat(stackTrace[2].getMethodName()).isEqualTo("shouldRemoveNoiseFromShadowedStackTraces"); assertThat(stackTrace[2].getLineNumber()).isGreaterThan(0); } @Instrument public static class ThrowInShadowMethod { public void method() throws IOException { } } @Implements(ThrowInShadowMethod.class) public static class ShadowThrowInShadowMethod { public void method() throws IOException { throw new IOException("fake exception"); } } @Test @Config(shadows = ShadowThrowInRealMethod.class) public void shouldRemoveNoiseFromUnshadowedStackTraces() throws Exception { ThrowInRealMethod instance = new ThrowInRealMethod(); Exception e = null; try { instance.method(); } catch (Exception e1) { e = e1; } assertNotNull(e); assertEquals(IOException.class, e.getClass()); assertEquals("fake exception", e.getMessage()); StackTraceElement[] stackTrace = e.getStackTrace(); assertThat(stackTrace[0].getClassName()).isEqualTo(ThrowInRealMethod.class.getName()); assertThat(stackTrace[0].getMethodName()).isEqualTo("method"); assertThat(stackTrace[0].getLineNumber()).isGreaterThan(0); assertThat(stackTrace[1].getClassName()).isEqualTo(ShadowWranglerTest.class.getName()); assertThat(stackTrace[1].getMethodName()).isEqualTo("shouldRemoveNoiseFromUnshadowedStackTraces"); assertThat(stackTrace[1].getLineNumber()).isGreaterThan(0); } @Instrument public static class ThrowInRealMethod { public void method() throws IOException { throw new IOException("fake exception"); } } @Implements(ThrowInRealMethod.class) public static class ShadowThrowInRealMethod { } @Test @Config(shadows = {ShadowOfChildWithInheritance.class, ShadowOfParent.class}) public void whenInheritanceIsEnabled_shouldUseShadowSuperclassMethods() throws Exception { assertThat(new Child().get()).isEqualTo("from shadow of parent"); } @Test @Config(shadows = {ShadowOfChildWithoutInheritance.class, ShadowOfParent.class}) public void whenInheritanceIsDisabled_shouldUseShadowSuperclassMethods() throws Exception { assertThat(new Child().get()).isEqualTo("from child (from shadow of parent)"); } @Instrument public static class Parent { public String get() { return "from parent"; } } @Instrument public static class Child extends Parent { public String get() { return "from child (" + super.get() + ")"; } } @Implements(Parent.class) public static class ShadowOfParent { @Implementation public String get() { return "from shadow of parent"; } } @Implements(value = Child.class, inheritImplementationMethods = true) public static class ShadowOfChildWithInheritance extends ShadowOfParent { } @Implements(value = Child.class, inheritImplementationMethods = false) public static class ShadowOfChildWithoutInheritance extends ShadowOfParent { } private ShadowFoo shadowOf(Foo foo) { return (ShadowFoo) ShadowExtractor.extract(foo); } private ShadowTextFoo shadowOf(TextFoo foo) { return (ShadowTextFoo) ShadowExtractor.extract(foo); } @Implements(Foo.class) public static class WithEquals { @SuppressWarnings("UnusedDeclaration") public void __constructor__(String s) { } @Override public boolean equals(Object o) { return true; } @Override public int hashCode() { return 42; } } @Implements(Foo.class) public static class WithToString { @SuppressWarnings("UnusedDeclaration") public void __constructor__(String s) { } @Override public String toString() { return "the expected string"; } } @Implements(TextFoo.class) public static class ShadowTextFoo extends ShadowFoo { } @Instrument public static class TextFoo extends Foo { public TextFoo(String s) { super(s); } } @Implements(Foo.class) public static class ShadowFooParent { @RealObject private Foo realFoo; public Foo realFooInParentConstructor; public void __constructor__(String name) { realFooInParentConstructor = realFoo; } } @Instrument public static class AClassWithDefaultConstructor { public boolean initialized; public AClassWithDefaultConstructor() { initialized = true; } } @Implements(AClassWithDefaultConstructor.class) public static class ShadowForAClassWithDefaultConstructor_HavingNoConstructorDelegate { } @Config(shadows = ShadowAClassWithDifficultArgs.class) @Test public void shouldAllowLooseSignatureMatches() throws Exception { assertThat(new AClassWithDifficultArgs().aMethod("bc")).isEqualTo("abc"); } @Implements(value = AClassWithDifficultArgs.class, looseSignatures = true) public static class ShadowAClassWithDifficultArgs { @Implementation public Object aMethod(Object s) { return "a" + s; } } @Instrument public static class AClassWithDifficultArgs { public CharSequence aMethod(CharSequence s) { return s; } } }
mit
y0ke/actor-platform
actor-sdk/sdk-core/core/core-shared/src/main/java/im/actor/core/api/updates/UpdateGroupTitleChanged.java
2554
package im.actor.core.api.updates; /* * Generated by the Actor API Scheme generator. DO NOT EDIT! */ import im.actor.runtime.bser.*; import im.actor.runtime.collections.*; import static im.actor.runtime.bser.Utils.*; import im.actor.core.network.parser.*; import org.jetbrains.annotations.Nullable; import org.jetbrains.annotations.NotNull; import com.google.j2objc.annotations.ObjectiveCName; import java.io.IOException; import java.util.List; import java.util.ArrayList; import im.actor.core.api.*; public class UpdateGroupTitleChanged extends Update { public static final int HEADER = 0x26; public static UpdateGroupTitleChanged fromBytes(byte[] data) throws IOException { return Bser.parse(new UpdateGroupTitleChanged(), data); } private int groupId; private long rid; private int uid; private String title; private long date; public UpdateGroupTitleChanged(int groupId, long rid, int uid, @NotNull String title, long date) { this.groupId = groupId; this.rid = rid; this.uid = uid; this.title = title; this.date = date; } public UpdateGroupTitleChanged() { } public int getGroupId() { return this.groupId; } public long getRid() { return this.rid; } public int getUid() { return this.uid; } @NotNull public String getTitle() { return this.title; } public long getDate() { return this.date; } @Override public void parse(BserValues values) throws IOException { this.groupId = values.getInt(1); this.rid = values.getLong(5); this.uid = values.getInt(2); this.title = values.getString(3); this.date = values.getLong(4); } @Override public void serialize(BserWriter writer) throws IOException { writer.writeInt(1, this.groupId); writer.writeLong(5, this.rid); writer.writeInt(2, this.uid); if (this.title == null) { throw new IOException(); } writer.writeString(3, this.title); writer.writeLong(4, this.date); } @Override public String toString() { String res = "update GroupTitleChanged{"; res += "groupId=" + this.groupId; res += ", rid=" + this.rid; res += ", uid=" + this.uid; res += ", title=" + this.title; res += ", date=" + this.date; res += "}"; return res; } @Override public int getHeaderKey() { return HEADER; } }
agpl-3.0
Letractively/owasp-esapi-java
src/test/java/org/owasp/esapi/waf/internal/InterceptingHttpServletRequestTest.java
2042
/** * OWASP Enterprise Security API (ESAPI) * * This file is part of the Open Web Application Security Project (OWASP) * Enterprise Security API (ESAPI) project. For details, please see * <a href="http://www.owasp.org/index.php/ESAPI">http://www.owasp.org/index.php/ESAPI</a>. * * Copyright (c) 2007 - The OWASP Foundation * * The ESAPI is published by OWASP under the BSD license. You should read and accept the * LICENSE before you use, modify, and/or redistribute this software. * * @author Jeff Williams <a href="http://www.aspectsecurity.com">Aspect Security</a> * @created 2007 */ package org.owasp.esapi.waf.internal; import junit.framework.Test; import junit.framework.TestCase; import junit.framework.TestSuite; import org.owasp.esapi.http.MockHttpServletRequest; /** * @author Jeff Williams (jeff.williams@aspectsecurity.com) */ public class InterceptingHttpServletRequestTest extends TestCase { /** * Instantiates a new test. * * @param testName * the test name */ public InterceptingHttpServletRequestTest(String testName) { super(testName); } /** * {@inheritDoc} * @throws Exception */ protected void setUp() throws Exception { // none } /** * {@inheritDoc} * @throws Exception */ protected void tearDown() throws Exception { // none } /** * Suite. * * @return the test */ public static Test suite() { TestSuite suite = new TestSuite(InterceptingHttpServletRequestTest.class); return suite; } /** * Test. */ public void testRequest() throws Exception { System.out.println("InterceptingHTTPServletRequest"); MockHttpServletRequest mreq = new MockHttpServletRequest(); mreq.setMethod( "GET" ); InterceptingHTTPServletRequest ireq = new InterceptingHTTPServletRequest(mreq); assertEquals( mreq.getMethod(), ireq.getMethod() ); } }
bsd-3-clause
huangsongyan/hellocharts-android
hellocharts-samples/src/lecho/lib/hellocharts/samples/PreviewLineChartActivity.java
6979
package lecho.lib.hellocharts.samples; import android.os.Bundle; import android.support.v4.app.Fragment; import android.support.v7.app.ActionBarActivity; import android.view.LayoutInflater; import android.view.Menu; import android.view.MenuInflater; import android.view.MenuItem; import android.view.View; import android.view.ViewGroup; import java.util.ArrayList; import java.util.List; import lecho.lib.hellocharts.gesture.ZoomType; import lecho.lib.hellocharts.listener.ViewportChangeListener; import lecho.lib.hellocharts.model.Axis; import lecho.lib.hellocharts.model.Line; import lecho.lib.hellocharts.model.LineChartData; import lecho.lib.hellocharts.model.PointValue; import lecho.lib.hellocharts.model.Viewport; import lecho.lib.hellocharts.util.ChartUtils; import lecho.lib.hellocharts.view.LineChartView; import lecho.lib.hellocharts.view.PreviewLineChartView; public class PreviewLineChartActivity extends ActionBarActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_preview_line_chart); if (savedInstanceState == null) { getSupportFragmentManager().beginTransaction().add(R.id.container, new PlaceholderFragment()).commit(); } } /** * A fragment containing a line chart and preview line chart. */ public static class PlaceholderFragment extends Fragment { private LineChartView chart; private PreviewLineChartView previewChart; private LineChartData data; /** * Deep copy of data. */ private LineChartData previewData; public PlaceholderFragment() { } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { setHasOptionsMenu(true); View rootView = inflater.inflate(R.layout.fragment_preview_line_chart, container, false); chart = (LineChartView) rootView.findViewById(R.id.chart); previewChart = (PreviewLineChartView) rootView.findViewById(R.id.chart_preview); // Generate data for previewed chart and copy of that data for preview chart. generateDefaultData(); chart.setLineChartData(data); // Disable zoom/scroll for previewed chart, visible chart ranges depends on preview chart viewport so // zoom/scroll is unnecessary. chart.setZoomEnabled(false); chart.setScrollEnabled(false); previewChart.setLineChartData(previewData); previewChart.setViewportChangeListener(new ViewportListener()); previewX(false); return rootView; } // MENU @Override public void onCreateOptionsMenu(Menu menu, MenuInflater inflater) { inflater.inflate(R.menu.preview_line_chart, menu); } @Override public boolean onOptionsItemSelected(MenuItem item) { int id = item.getItemId(); if (id == R.id.action_reset) { generateDefaultData(); chart.setLineChartData(data); previewChart.setLineChartData(previewData); previewX(true); return true; } if (id == R.id.action_preview_both) { previewXY(); previewChart.setZoomType(ZoomType.HORIZONTAL_AND_VERTICAL); return true; } if (id == R.id.action_preview_horizontal) { previewX(true); return true; } if (id == R.id.action_preview_vertical) { previewY(); return true; } if (id == R.id.action_change_color) { int color = ChartUtils.pickColor(); while (color == previewChart.getPreviewColor()) { color = ChartUtils.pickColor(); } previewChart.setPreviewColor(color); return true; } return super.onOptionsItemSelected(item); } private void generateDefaultData() { int numValues = 50; List<PointValue> values = new ArrayList<PointValue>(); for (int i = 0; i < numValues; ++i) { values.add(new PointValue(i, (float) Math.random() * 100f)); } Line line = new Line(values); line.setColor(ChartUtils.COLOR_GREEN); line.setHasPoints(false);// too many values so don't draw points. List<Line> lines = new ArrayList<Line>(); lines.add(line); data = new LineChartData(lines); data.setAxisXBottom(new Axis()); data.setAxisYLeft(new Axis().setHasLines(true)); // prepare preview data, is better to use separate deep copy for preview chart. // Set color to grey to make preview area more visible. previewData = new LineChartData(data); previewData.getLines().get(0).setColor(ChartUtils.DEFAULT_DARKEN_COLOR); } private void previewY() { Viewport tempViewport = new Viewport(chart.getMaximumViewport()); float dy = tempViewport.height() / 4; tempViewport.inset(0, dy); previewChart.setCurrentViewportWithAnimation(tempViewport); previewChart.setZoomType(ZoomType.VERTICAL); } private void previewX(boolean animate) { Viewport tempViewport = new Viewport(chart.getMaximumViewport()); float dx = tempViewport.width() / 4; tempViewport.inset(dx, 0); if (animate) { previewChart.setCurrentViewportWithAnimation(tempViewport); } else { previewChart.setCurrentViewport(tempViewport); } previewChart.setZoomType(ZoomType.HORIZONTAL); } private void previewXY() { // Better to not modify viewport of any chart directly so create a copy. Viewport tempViewport = new Viewport(chart.getMaximumViewport()); // Make temp viewport smaller. float dx = tempViewport.width() / 4; float dy = tempViewport.height() / 4; tempViewport.inset(dx, dy); previewChart.setCurrentViewportWithAnimation(tempViewport); } /** * Viewport listener for preview chart(lower one). in {@link #onViewportChanged(Viewport)} method change * viewport of upper chart. */ private class ViewportListener implements ViewportChangeListener { @Override public void onViewportChanged(Viewport newViewport) { // don't use animation, it is unnecessary when using preview chart. chart.setCurrentViewport(newViewport); } } } }
apache-2.0
combinatorist/elasticsearch
src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java
10781
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.index.query; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; /** * A Query that matches documents within an range of terms. * * */ public class RangeQueryBuilder extends BaseQueryBuilder implements MultiTermQueryBuilder, BoostableQueryBuilder<RangeQueryBuilder> { private final String name; private Object from; private Object to; private String timeZone; private boolean includeLower = true; private boolean includeUpper = true; private float boost = -1; private String queryName; /** * A Query that matches documents within an range of terms. * * @param name The field name */ public RangeQueryBuilder(String name) { this.name = name; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder from(Object from) { this.from = from; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder from(String from) { this.from = from; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder from(int from) { this.from = from; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder from(long from) { this.from = from; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder from(float from) { this.from = from; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder from(double from) { this.from = from; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder gt(String from) { this.from = from; this.includeLower = false; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder gt(Object from) { this.from = from; this.includeLower = false; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder gt(int from) { this.from = from; this.includeLower = false; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder gt(long from) { this.from = from; this.includeLower = false; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder gt(float from) { this.from = from; this.includeLower = false; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder gt(double from) { this.from = from; this.includeLower = false; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder gte(String from) { this.from = from; this.includeLower = true; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder gte(Object from) { this.from = from; this.includeLower = true; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder gte(int from) { this.from = from; this.includeLower = true; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder gte(long from) { this.from = from; this.includeLower = true; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder gte(float from) { this.from = from; this.includeLower = true; return this; } /** * The from part of the range query. Null indicates unbounded. */ public RangeQueryBuilder gte(double from) { this.from = from; this.includeLower = true; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder to(Object to) { this.to = to; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder to(String to) { this.to = to; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder to(int to) { this.to = to; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder to(long to) { this.to = to; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder to(float to) { this.to = to; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder to(double to) { this.to = to; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder lt(String to) { this.to = to; this.includeUpper = false; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder lt(Object to) { this.to = to; this.includeUpper = false; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder lt(int to) { this.to = to; this.includeUpper = false; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder lt(long to) { this.to = to; this.includeUpper = false; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder lt(float to) { this.to = to; this.includeUpper = false; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder lt(double to) { this.to = to; this.includeUpper = false; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder lte(String to) { this.to = to; this.includeUpper = true; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder lte(Object to) { this.to = to; this.includeUpper = true; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder lte(int to) { this.to = to; this.includeUpper = true; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder lte(long to) { this.to = to; this.includeUpper = true; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder lte(float to) { this.to = to; this.includeUpper = true; return this; } /** * The to part of the range query. Null indicates unbounded. */ public RangeQueryBuilder lte(double to) { this.to = to; this.includeUpper = true; return this; } /** * Should the lower bound be included or not. Defaults to <tt>true</tt>. */ public RangeQueryBuilder includeLower(boolean includeLower) { this.includeLower = includeLower; return this; } /** * Should the upper bound be included or not. Defaults to <tt>true</tt>. */ public RangeQueryBuilder includeUpper(boolean includeUpper) { this.includeUpper = includeUpper; return this; } /** * Sets the boost for this query. Documents matching this query will (in addition to the normal * weightings) have their score multiplied by the boost provided. */ public RangeQueryBuilder boost(float boost) { this.boost = boost; return this; } /** * Sets the query name for the filter that can be used when searching for matched_filters per hit. */ public RangeQueryBuilder queryName(String queryName) { this.queryName = queryName; return this; } /** * In case of date field, we can adjust the from/to fields using a timezone */ public RangeQueryBuilder timeZone(String preZone) { this.timeZone = preZone; return this; } @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(RangeQueryParser.NAME); builder.startObject(name); builder.field("from", from); builder.field("to", to); if (timeZone != null) { builder.field("time_zone", timeZone); } builder.field("include_lower", includeLower); builder.field("include_upper", includeUpper); if (boost != -1) { builder.field("boost", boost); } if (queryName != null) { builder.field("_name", queryName); } builder.endObject(); builder.endObject(); } }
apache-2.0
iKrelve/DexHunter
dalvik/dexgen/src/com/android/dexgen/dex/code/form/Form31c.java
3714
/* * Copyright (C) 2007 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.dexgen.dex.code.form; import com.android.dexgen.dex.code.CstInsn; import com.android.dexgen.dex.code.DalvInsn; import com.android.dexgen.dex.code.InsnFormat; import com.android.dexgen.rop.code.RegisterSpec; import com.android.dexgen.rop.code.RegisterSpecList; import com.android.dexgen.rop.cst.Constant; import com.android.dexgen.rop.cst.CstFieldRef; import com.android.dexgen.rop.cst.CstString; import com.android.dexgen.rop.cst.CstType; import com.android.dexgen.util.AnnotatedOutput; /** * Instruction format {@code 31c}. See the instruction format spec * for details. */ public final class Form31c extends InsnFormat { /** {@code non-null;} unique instance of this class */ public static final InsnFormat THE_ONE = new Form31c(); /** * Constructs an instance. This class is not publicly * instantiable. Use {@link #THE_ONE}. */ private Form31c() { // This space intentionally left blank. } /** {@inheritDoc} */ @Override public String insnArgString(DalvInsn insn) { RegisterSpecList regs = insn.getRegisters(); return regs.get(0).regString() + ", " + cstString(insn); } /** {@inheritDoc} */ @Override public String insnCommentString(DalvInsn insn, boolean noteIndices) { if (noteIndices) { return cstComment(insn); } else { return ""; } } /** {@inheritDoc} */ @Override public int codeSize() { return 3; } /** {@inheritDoc} */ @Override public boolean isCompatible(DalvInsn insn) { if (!(insn instanceof CstInsn)) { return false; } RegisterSpecList regs = insn.getRegisters(); RegisterSpec reg; switch (regs.size()) { case 1: { reg = regs.get(0); break; } case 2: { /* * This format is allowed for ops that are effectively * 2-arg but where the two args are identical. */ reg = regs.get(0); if (reg.getReg() != regs.get(1).getReg()) { return false; } break; } default: { return false; } } if (!unsignedFitsInByte(reg.getReg())) { return false; } CstInsn ci = (CstInsn) insn; Constant cst = ci.getConstant(); return ((cst instanceof CstType) || (cst instanceof CstFieldRef) || (cst instanceof CstString)); } /** {@inheritDoc} */ @Override public InsnFormat nextUp() { return null; } /** {@inheritDoc} */ @Override public void writeTo(AnnotatedOutput out, DalvInsn insn) { RegisterSpecList regs = insn.getRegisters(); int cpi = ((CstInsn) insn).getIndex(); write(out, opcodeUnit(insn, regs.get(0).getReg()), (short) cpi, (short) (cpi >> 16)); } }
apache-2.0
jowiho/openhab
bundles/binding/org.openhab.binding.tinkerforge/src/main/java/org/openhab/binding/tinkerforge/internal/types/PercentValue.java
718
/** * Copyright (c) 2010-2016 by the respective copyright holders. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html */ package org.openhab.binding.tinkerforge.internal.types; import java.math.BigDecimal; import org.openhab.core.library.types.PercentType; public class PercentValue extends PercentType implements TinkerforgeValue { public PercentValue(BigDecimal bigDecimal) { super(bigDecimal); } /** * */ private static final long serialVersionUID = 8087283524157935305L; }
epl-1.0
a12n/godot
platform/android/java/src/com/android/godot/payments/ReleaseAllConsumablesTask.java
2648
package com.android.godot.payments; import java.util.ArrayList; import org.json.JSONException; import org.json.JSONObject; import com.android.godot.Dictionary; import com.android.godot.Godot; import com.android.vending.billing.IInAppBillingService; import android.content.Context; import android.os.AsyncTask; import android.os.Bundle; import android.os.RemoteException; import android.util.Log; abstract public class ReleaseAllConsumablesTask { private Context context; private IInAppBillingService mService; public ReleaseAllConsumablesTask(IInAppBillingService mService, Context context ){ this.context = context; this.mService = mService; } public void consumeItAll(){ try{ // Log.d("godot", "consumeItall for " + context.getPackageName()); Bundle bundle = mService.getPurchases(3, context.getPackageName(), "inapp",null); for (String key : bundle.keySet()) { Object value = bundle.get(key); // Log.d("godot", String.format("%s %s (%s)", key, // value.toString(), value.getClass().getName())); } if (bundle.getInt("RESPONSE_CODE") == 0){ final ArrayList<String> myPurchases = bundle.getStringArrayList("INAPP_PURCHASE_DATA_LIST"); final ArrayList<String> mySignatures = bundle.getStringArrayList("INAPP_DATA_SIGNATURE_LIST"); if (myPurchases == null || myPurchases.size() == 0){ // Log.d("godot", "No purchases!"); notRequired(); return; } // Log.d("godot", "# products to be consumed:" + myPurchases.size()); for (int i=0;i<myPurchases.size();i++) { try{ String receipt = myPurchases.get(i); JSONObject inappPurchaseData = new JSONObject(receipt); String sku = inappPurchaseData.getString("productId"); String token = inappPurchaseData.getString("purchaseToken"); String signature = mySignatures.get(i); // Log.d("godot", "A punto de consumir un item con token:" + token + "\n" + receipt); new GenericConsumeTask(context, mService, sku, receipt,signature, token) { @Override public void onSuccess(String sku, String receipt, String signature, String token) { ReleaseAllConsumablesTask.this.success(sku, receipt, signature, token); } }.execute(); } catch (JSONException e) { } } } }catch(Exception e){ Log.d("godot", "Error releasing products:" + e.getClass().getName() + ":" + e.getMessage()); } } abstract protected void success(String sku, String receipt, String signature, String token); abstract protected void error(String message); abstract protected void notRequired(); }
mit
mammothcm/mammoth
src/org/apache/hadoop/mapred/TaskLogServlet.java
10703
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import javax.servlet.ServletContext; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.QueueManager.QueueACL; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.http.HtmlQuoting; import org.apache.hadoop.util.StringUtils; /** * A servlet that is run by the TaskTrackers to provide the task logs via http. */ public class TaskLogServlet extends HttpServlet { private static final long serialVersionUID = -6615764817774487321L; private static final Log LOG = LogFactory.getLog(TaskLog.class); private boolean haveTaskLog(TaskAttemptID taskId, boolean isCleanup, TaskLog.LogName type) { File f = TaskLog.getTaskLogFile(taskId, isCleanup, type); return f.canRead(); } /** * Construct the taskLogUrl * @param taskTrackerHostName * @param httpPort * @param taskAttemptID * @return the taskLogUrl */ public static String getTaskLogUrl(String taskTrackerHostName, String httpPort, String taskAttemptID) { return ("http://" + taskTrackerHostName + ":" + httpPort + "/tasklog?attemptid=" + taskAttemptID); } private void printTaskLog(HttpServletResponse response, OutputStream out, TaskAttemptID taskId, long start, long end, boolean plainText, TaskLog.LogName filter, boolean isCleanup) throws IOException { if (!plainText) { out.write(("<br><b><u>" + filter + " logs</u></b><br>\n" + "<pre>\n").getBytes()); } try { InputStream taskLogReader = new TaskLog.Reader(taskId, filter, start, end, isCleanup); byte[] b = new byte[65536]; int result; while (true) { result = taskLogReader.read(b); if (result > 0) { if (plainText) { out.write(b, 0, result); } else { HtmlQuoting.quoteHtmlChars(out, b, 0, result); } } else { break; } } taskLogReader.close(); if( !plainText ) { out.write("</pre></td></tr></table><hr><br>\n".getBytes()); } } catch (IOException ioe) { if (filter == TaskLog.LogName.DEBUGOUT) { if (!plainText) { out.write("</pre><hr><br>\n".getBytes()); } // do nothing } else { String msg = "Failed to retrieve " + filter + " log for task: " + taskId; LOG.warn(msg, ioe); response.sendError(HttpServletResponse.SC_GONE, msg); } } } /** * Validates if the given user has job view permissions for this job. * conf contains jobOwner and job-view-ACLs. * We allow jobOwner, superUser(i.e. mrOwner) and cluster administrators and * users and groups specified in configuration using * mapreduce.job.acl-view-job to view job. */ private void checkAccessForTaskLogs(JobConf conf, String user, String jobId, TaskTracker tracker) throws AccessControlException { if (!tracker.areACLsEnabled()) { return; } // build job view ACL by reading from conf AccessControlList jobViewACL = tracker.getJobACLsManager(). constructJobACLs(conf).get(JobACL.VIEW_JOB); // read job queue name from conf String queue = conf.getQueueName(); // build queue admins ACL by reading from conf AccessControlList queueAdminsACL = new AccessControlList( conf.get(QueueManager.toFullPropertyName(queue, QueueACL.ADMINISTER_JOBS.getAclName()), " ")); String jobOwner = conf.get("user.name"); UserGroupInformation callerUGI = UserGroupInformation.createRemoteUser(user); // check if user is queue admin or cluster admin or jobOwner or member of // job-view-acl if (!queueAdminsACL.isUserAllowed(callerUGI)) { tracker.getACLsManager().checkAccess(jobId, callerUGI, queue, Operation.VIEW_TASK_LOGS, jobOwner, jobViewACL); } } /** * Builds a JobConf object by reading the job-acls.xml file. * This doesn't load the default resources. * * Returns null if job-acls.xml is not there in userlogs/$jobid on * local file system. This can happen when we restart the cluster with job * level authorization enabled(but was disabled on earlier cluster) and * viewing task logs of old jobs(i.e. jobs finished on earlier unsecure * cluster). */ static JobConf getConfFromJobACLsFile(JobID jobId) { Path jobAclsFilePath = new Path( TaskLog.getJobDir(jobId).toString(), TaskTracker.jobACLsFile); JobConf conf = null; if (new File(jobAclsFilePath.toUri().getPath()).exists()) { conf = new JobConf(false); conf.addResource(jobAclsFilePath); } return conf; } /** * Get the logs via http. */ @Override public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { long start = 0; long end = -1; boolean plainText = false; TaskLog.LogName filter = null; boolean isCleanup = false; String attemptIdStr = request.getParameter("attemptid"); if (attemptIdStr == null) { response.sendError(HttpServletResponse.SC_BAD_REQUEST, "Argument attemptid is required"); return; } String logFilter = request.getParameter("filter"); if (logFilter != null) { try { filter = TaskLog.LogName.valueOf(TaskLog.LogName.class, logFilter.toUpperCase()); } catch (IllegalArgumentException iae) { response.sendError(HttpServletResponse.SC_BAD_REQUEST, "Illegal value for filter: " + logFilter); return; } } String sLogOff = request.getParameter("start"); if (sLogOff != null) { start = Long.valueOf(sLogOff).longValue(); } String sLogEnd = request.getParameter("end"); if (sLogEnd != null) { end = Long.valueOf(sLogEnd).longValue(); } String sPlainText = request.getParameter("plaintext"); if (sPlainText != null) { plainText = Boolean.valueOf(sPlainText); } String sCleanup = request.getParameter("cleanup"); if (sCleanup != null) { isCleanup = Boolean.valueOf(sCleanup); } TaskAttemptID attemptId = TaskAttemptID.forName(attemptIdStr); if (!TaskLog.getAttemptDir(attemptId, isCleanup).exists()) { response.sendError(HttpServletResponse.SC_GONE, "Task log directory for task " + attemptId + " does not exist. May be cleaned up by Task Tracker, if older logs."); return; } // get user name who is accessing String user = request.getRemoteUser(); if (user != null) { ServletContext context = getServletContext(); TaskTracker taskTracker = (TaskTracker) context.getAttribute( "task.tracker"); JobID jobId = attemptId.getJobID(); // get jobACLConf from ACLs file JobConf jobACLConf = getConfFromJobACLsFile(jobId); // Ignore authorization if job-acls.xml is not found if (jobACLConf != null) { try { checkAccessForTaskLogs(jobACLConf, user, jobId.toString(), taskTracker); } catch (AccessControlException e) { String errMsg = "User " + user + " failed to view tasklogs of job " + jobId + "!\n\n" + e.getMessage(); response.sendError(HttpServletResponse.SC_UNAUTHORIZED, errMsg); return; } } } OutputStream out = response.getOutputStream(); if( !plainText ) { out.write(("<html>\n" + "<title>Task Logs: '" + attemptId + "'</title>\n" + "<body>\n" + "<h1>Task Logs: '" + attemptId + "'</h1><br>\n").getBytes()); if (filter == null) { printTaskLog(response, out, attemptId, start, end, plainText, TaskLog.LogName.STDOUT, isCleanup); printTaskLog(response, out, attemptId, start, end, plainText, TaskLog.LogName.STDERR, isCleanup); if (haveTaskLog(attemptId, isCleanup, TaskLog.LogName.SYSLOG)) { printTaskLog(response, out, attemptId, start, end, plainText, TaskLog.LogName.SYSLOG, isCleanup); } if (haveTaskLog(attemptId, isCleanup, TaskLog.LogName.DEBUGOUT)) { printTaskLog(response, out, attemptId, start, end, plainText, TaskLog.LogName.DEBUGOUT, isCleanup); } if (haveTaskLog(attemptId, isCleanup, TaskLog.LogName.PROFILE)) { printTaskLog(response, out, attemptId, start, end, plainText, TaskLog.LogName.PROFILE, isCleanup); } } else { printTaskLog(response, out, attemptId, start, end, plainText, filter, isCleanup); } out.write("</body></html>\n".getBytes()); out.close(); } else if (filter == null) { response.sendError(HttpServletResponse.SC_BAD_REQUEST, "You must supply a value for `filter' (STDOUT, STDERR, or SYSLOG) if you set plainText = true"); } else { printTaskLog(response, out, attemptId, start, end, plainText, filter, isCleanup); } } }
apache-2.0
NhlalukoG/android_samsung_j7e3g
vendor/samsung/preloads/UniversalMDMClient/rhino1_7R4/src/org/mozilla/javascript/ast/ParseProblem.java
2056
/* -*- Mode: java; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ package org.mozilla.javascript.ast; /** * Encapsulates information for a JavaScript parse error or warning. */ public class ParseProblem { public static enum Type {Error, Warning} private Type type; private String message; private String sourceName; private int offset; private int length; /** * Constructs a new ParseProblem. */ public ParseProblem(ParseProblem.Type type, String message, String sourceName, int offset, int length) { setType(type); setMessage(message); setSourceName(sourceName); setFileOffset(offset); setLength(length); } public ParseProblem.Type getType() { return type; } public void setType(ParseProblem.Type type) { this.type = type; } public String getMessage() { return message; } public void setMessage(String msg) { this.message = msg; } public String getSourceName() { return sourceName; } public void setSourceName(String name) { this.sourceName = name; } public int getFileOffset() { return offset; } public void setFileOffset(int offset) { this.offset = offset; } public int getLength() { return length; } public void setLength(int length) { this.length = length; } @Override public String toString() { StringBuilder sb = new StringBuilder(200); sb.append(sourceName).append(":"); sb.append("offset=").append(offset).append(","); sb.append("length=").append(length).append(","); sb.append(type == Type.Error ? "error: " : "warning: "); sb.append(message); return sb.toString(); } }
gpl-2.0
kuzemchik/presto
presto-main/src/main/java/com/facebook/presto/metadata/FunctionFactory.java
709
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto.metadata; import java.util.List; public interface FunctionFactory { List<ParametricFunction> listFunctions(); }
apache-2.0
lemonJun/Jkafka
jkafka-core/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipal.java
2769
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p/> * http://www.apache.org/licenses/LICENSE-2.0 * <p/> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.security.auth; import java.security.Principal; public class KafkaPrincipal implements Principal { public static final String SEPARATOR = ":"; public static final String USER_TYPE = "User"; public final static KafkaPrincipal ANONYMOUS = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "ANONYMOUS"); private String principalType; private String name; public KafkaPrincipal(String principalType, String name) { if (principalType == null || name == null) { throw new IllegalArgumentException("principalType and name can not be null"); } this.principalType = principalType; this.name = name; } public static KafkaPrincipal fromString(String str) { if (str == null || str.isEmpty()) { throw new IllegalArgumentException("expected a string in format principalType:principalName but got " + str); } String[] split = str.split(SEPARATOR, 2); if (split == null || split.length != 2) { throw new IllegalArgumentException("expected a string in format principalType:principalName but got " + str); } return new KafkaPrincipal(split[0], split[1]); } @Override public String toString() { return principalType + SEPARATOR + name; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof KafkaPrincipal)) return false; KafkaPrincipal that = (KafkaPrincipal) o; if (!principalType.equals(that.principalType)) return false; return name.equals(that.name); } @Override public int hashCode() { int result = principalType.hashCode(); result = 31 * result + name.hashCode(); return result; } @Override public String getName() { return name; } public String getPrincipalType() { return principalType; } }
apache-2.0
kristinehahn/drill
exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/conv/UInt8ConvertTo.java
2050
/******************************************************************************* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.drill.exec.expr.fn.impl.conv; import io.netty.buffer.DrillBuf; import javax.inject.Inject; import org.apache.drill.exec.expr.DrillSimpleFunc; import org.apache.drill.exec.expr.annotations.FunctionTemplate; import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionScope; import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling; import org.apache.drill.exec.expr.annotations.Output; import org.apache.drill.exec.expr.annotations.Param; import org.apache.drill.exec.expr.holders.UInt8Holder; import org.apache.drill.exec.expr.holders.VarBinaryHolder; @FunctionTemplate(name = "convert_toUINT8", scope = FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL) public class UInt8ConvertTo implements DrillSimpleFunc { @Param UInt8Holder in; @Output VarBinaryHolder out; @Inject DrillBuf buffer; @Override public void setup() { buffer = buffer.reallocIfNeeded(8); } @Override public void eval() { buffer.clear(); buffer.writeLong(in.value); out.buffer = buffer; out.start = 0; out.end = 8; } }
apache-2.0
rokn/Count_Words_2015
testing/openjdk2/langtools/test/tools/javac/annotations/typeAnnotations/failures/common/innertypeparams/InvalidLocation.java
386
/* * @test /nodynamiccopyright/ * @bug 6843077 8006775 * @summary check for invalid annotatins given the target * @author Mahmood Ali * @compile/fail/ref=InvalidLocation.out -XDrawDiagnostics InvalidLocation.java */ class InvalidLocation { void innermethod() { class Inner<@A K> {} } } @java.lang.annotation.Target(java.lang.annotation.ElementType.TYPE) @interface A { }
mit
medicayun/medicayundicom
dcm4che14/tags/DCM4JBOSS_2_7_6/samples/java/MediaCreationMgtScu.java
20086
/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Mozilla Public License Version * 1.1 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is part of dcm4che, an implementation of DICOM(TM) in * Java(TM), hosted at http://sourceforge.net/projects/dcm4che. * * The Initial Developer of the Original Code is * TIANI Medgraph AG. * Portions created by the Initial Developer are Copyright (C) 2002-2005 * the Initial Developer. All Rights Reserved. * * Contributor(s): * Gunter Zeilinger <gunter.zeilinger@tiani.com> * * Alternatively, the contents of this file may be used under the terms of * either the GNU General Public License Version 2 or later (the "GPL"), or * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the MPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the MPL, the GPL or the LGPL. * * ***** END LICENSE BLOCK ***** */ import gnu.getopt.Getopt; import gnu.getopt.LongOpt; import java.io.BufferedInputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.net.Socket; import java.security.GeneralSecurityException; import java.util.ArrayList; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.ResourceBundle; import java.util.Map.Entry; import org.apache.log4j.Logger; import org.dcm4che.data.Command; import org.dcm4che.data.Dataset; import org.dcm4che.data.DcmElement; import org.dcm4che.data.DcmObjectFactory; import org.dcm4che.dict.Status; import org.dcm4che.dict.Tags; import org.dcm4che.dict.UIDs; import org.dcm4che.net.AAssociateAC; import org.dcm4che.net.AAssociateRQ; import org.dcm4che.net.ActiveAssociation; import org.dcm4che.net.Association; import org.dcm4che.net.AssociationFactory; import org.dcm4che.net.Dimse; import org.dcm4che.net.FutureRSP; import org.dcm4che.net.PDU; import org.dcm4che.util.DcmURL; import org.dcm4che.util.SSLContextAdapter; /****************************************** * * * dcm4che: A OpenSource DICOM Toolkit * * * * Distributable under LGPL license. * * See terms of license at gnu.org. * * * ******************************************/ /** * @author gunter.zeilinter@tiani.com * @version $Revision: 3922 $ $Date: 2005-10-06 00:26:16 +0800 (周四, 06 10月 2005) $ * @since 18.06.2004 * */ public class MediaCreationMgtScu { private static final String FALSE = "false"; private static final String TRUE = "true"; private static final String[] DEF_TS = { UIDs.ImplicitVRLittleEndian}; private static final int PCID_ECHO = 1; private static final int PCID_MCM = 3; private static final int ECHO = 0; private static final int CREATE = 1; private static final int SCHEDULE = 2; private static final int CANCEL = 4; private static final int GET = 8; private static final int INITIATE_MEDIA_CREATION = 1; private static final int CANCEL_MEDIA_CREATION = 2; private static final Logger log = Logger.getLogger("MediaCreationMgtScu"); private static final ResourceBundle messages = ResourceBundle.getBundle( "MediaCreationMgtScu", Locale.getDefault()); private static final LongOpt[] LONG_OPTS = new LongOpt[] { new LongOpt("profile", LongOpt.REQUIRED_ARGUMENT, null, 2), new LongOpt("create", LongOpt.REQUIRED_ARGUMENT, null, 'C'), new LongOpt("action", LongOpt.REQUIRED_ARGUMENT, null, 'A'), new LongOpt("get", LongOpt.REQUIRED_ARGUMENT, null, 'G'), new LongOpt("ac-timeout", LongOpt.REQUIRED_ARGUMENT, null, 2), new LongOpt("dimse-timeout", LongOpt.REQUIRED_ARGUMENT, null, 2), new LongOpt("so-close-delay", LongOpt.REQUIRED_ARGUMENT, null, 2), new LongOpt("max-pdu-len", LongOpt.REQUIRED_ARGUMENT, null, 2), new LongOpt("pack-pdvs", LongOpt.NO_ARGUMENT, null, 'k'), new LongOpt("tls-key", LongOpt.REQUIRED_ARGUMENT, null, 2), new LongOpt("tls-key-passwd", LongOpt.REQUIRED_ARGUMENT, null, 2), new LongOpt("tls-cacerts", LongOpt.REQUIRED_ARGUMENT, null, 2), new LongOpt("tls-cacerts-passwd", LongOpt.REQUIRED_ARGUMENT, null, 2), new LongOpt("ts", LongOpt.REQUIRED_ARGUMENT, null, 2), new LongOpt("help", LongOpt.NO_ARGUMENT, null, 'h'), new LongOpt("version", LongOpt.NO_ARGUMENT, null, 'v'),}; static final AssociationFactory aFact = AssociationFactory.getInstance(); static final DcmObjectFactory oFact = DcmObjectFactory.getInstance(); private int cmd = ECHO; private DcmURL url = null; private Dataset createAttrs = oFact.newDataset(); private Dataset actionAttrs = oFact.newDataset(); private int[] getAttrs; private int acTimeout = 5000; private int dimseTimeout = 0; private int soCloseDelay = 500; private AAssociateRQ assocRQ = aFact.newAAssociateRQ(); private ActiveAssociation assoc = null; private boolean packPDVs = false; private SSLContextAdapter tls = null; private String[] cipherSuites = null; private ActiveAssociation activeAssociation = null; private String profile; public static void main(String args[]) throws Exception { Getopt g = new Getopt("mcmscu", args, "caxgu:hv", LONG_OPTS); Configuration cfg = new Configuration(MediaCreationMgtScu.class .getResource("mcmscu.cfg")); int cmd = ECHO; String iuid = null; int c; while ((c = g.getopt()) != -1) { switch (c) { case 'c': case 'a': case 'x': case 'g': cmd |= c == 'c' ? CREATE : c == 'a' ? SCHEDULE : c == 'x' ? CANCEL : GET; break; case 'u': iuid = g.getOptarg(); break; case 2: cfg.put(LONG_OPTS[g.getLongind()].getName(), g.getOptarg()); break; case 'k': cfg.put("pack-pdvs", TRUE); break; case 'C': set(cfg, g.getOptarg(), "create."); break; case 'A': set(cfg, g.getOptarg(), "action."); break; case 'G': set(cfg, g.getOptarg(), "get."); break; case 'v': exit(messages.getString("version"), false); break; case 'h': exit(messages.getString("usage"), false); break; case '?': exit(null, true); break; } } int optind = g.getOptind(); int argc = args.length - optind; if (argc == 0) exit(messages.getString("missing-url"), true); if (iuid == null && cmd != ECHO && (cmd & CREATE) == 0) exit(messages.getString("missing-iuid"), true); // listConfig(cfg); try { MediaCreationMgtScu scu = new MediaCreationMgtScu(cfg, new DcmURL( args[optind])); Dataset createAttrs = null; if ((cmd & CREATE) != 0) createAttrs = scu.makeCreateAttrs(args, optind + 1); scu.openAssoc(); try { if (cmd == ECHO) scu.echo(); else { if (createAttrs != null) iuid = scu.create(iuid, createAttrs); if (iuid != null) { if ((cmd & SCHEDULE) != 0) scu.initiate(iuid); if ((cmd & CANCEL) != 0) scu.cancel(iuid); if ((cmd & GET) != 0) scu.get(iuid); } } } finally { scu.releaseAssoc(); } } catch (IllegalArgumentException e) { exit(e.getMessage(), true); } } public Dataset makeCreateAttrs(String[] args, int off) { Dataset ds = oFact.newDataset(); ds.putAll(createAttrs); DcmElement refSOPSeq = ds.putSQ(Tags.RefSOPSeq); for (int i = off; i < args.length; ++i) { addRefSOPItem(refSOPSeq, new File(args[i])); } return ds; } private void addRefSOPItem(DcmElement refSOPSeq, File file) { if (file.isDirectory()) { File[] files = file.listFiles(); for (int i = 0; i < files.length; i++) addRefSOPItem(refSOPSeq, files[i]); return; } InputStream in = null; try { log.info("M-READ " + file); in = new BufferedInputStream(new FileInputStream(file)); Dataset ds = oFact.newDataset(); ds.readFile(in, null, Tags.PixelData); final String iuid = ds.getString(Tags.SOPInstanceUID); final String cuid = ds.getString(Tags.SOPClassUID); if (iuid != null && cuid != null) { Dataset item = refSOPSeq.addNewItem(); item.putUI(Tags.RefSOPInstanceUID, iuid); item.putUI(Tags.RefSOPClassUID, cuid); if (profile != null) item.putLO(Tags.RequestedMediaApplicationProfile, profile); } else { log.warn("Missing CUID and/or IUID in DICOM object read from " + file); } } catch (IOException e) { log.warn("M-READ " + file + " failed:", e); } finally { if (in != null) try { in.close(); } catch (IOException ignore) { } } } private static void set(Configuration cfg, String s, String prefix) { int pos = s.indexOf(':'); if (pos == -1) { cfg.put(prefix + s, ""); } else { cfg.put(prefix + s.substring(0, pos), s.substring(pos + 1)); } } private static void exit(String prompt, boolean error) { if (prompt != null) System.err.println(prompt); if (error) System.err.println(messages.getString("try")); System.exit(1); } public MediaCreationMgtScu(Configuration cfg, DcmURL url) { this.url = url; packPDVs = TRUE.equalsIgnoreCase(cfg.getProperty("pack-pdvs", FALSE)); acTimeout = Integer.parseInt(cfg.getProperty("ac-timeout", "5000")); dimseTimeout = Integer.parseInt(cfg.getProperty("dimse-timeout", "0")); soCloseDelay = Integer.parseInt(cfg .getProperty("so-close-delay", "500")); initAssocRQ(cfg); initTLS(cfg); initAttrs(cfg); profile = cfg.getProperty("profile"); } private void initAssocRQ(Configuration cfg) { assocRQ.setCalledAET(url.getCalledAET()); String calling = url.getCallingAET(); if (calling == null) calling = "MCMSCU"; assocRQ.setCallingAET(calling); assocRQ.setMaxPDULength(Integer.parseInt(cfg.getProperty("max-pdu-len", "16352"))); packPDVs = TRUE.equalsIgnoreCase(cfg.getProperty("pack-pdvs", FALSE)); assocRQ.addPresContext(aFact.newPresContext(PCID_ECHO, UIDs.Verification, DEF_TS)); assocRQ.addPresContext(aFact.newPresContext(PCID_MCM, UIDs.MediaCreationManagementSOPClass, getTransferSyntaxUIDs(cfg))); } private String[] getTransferSyntaxUIDs(Configuration cfg) { List tsNames = cfg.tokenize(cfg.getProperty("ts"), new LinkedList()); String[] tsUIDs = new String[tsNames.size()]; for (int i = 0; i < tsUIDs.length; ++i) tsUIDs[i] = UIDs.forName((String) tsNames.get(i)); return tsUIDs; } private void initTLS(Configuration cfg) { try { cipherSuites = url.getCipherSuites(); if (cipherSuites == null) { return; } tls = SSLContextAdapter.getInstance(); char[] keypasswd = cfg.getProperty("tls-key-passwd", "secret") .toCharArray(); tls.setKey(tls.loadKeyStore(DcmSnd.class.getResource(cfg .getProperty("tls-key", "identity.p12")), keypasswd), keypasswd); tls .setTrust(tls.loadKeyStore(DcmSnd.class.getResource(cfg .getProperty("tls-cacerts", "cacerts.jks")), cfg .getProperty("tls-cacerts-passwd", "secret") .toCharArray())); tls.init(); } catch (Exception ex) { throw new RuntimeException( "Could not initalize TLS configuration: ", ex); } } private void initAttrs(Configuration cfg) { List list = new ArrayList(); for (Iterator it = cfg.entrySet().iterator(); it.hasNext();) { Map.Entry entry = (Entry) it.next(); String key = (String) entry.getKey(); String value = (String) entry.getValue(); try { if (key.startsWith("create.")) { createAttrs.putXX(Tags.forName(key.substring("create." .length())), value); } else if (key.startsWith("action.")) { actionAttrs.putXX(Tags.forName(key.substring("action." .length())), value); } else if (key.startsWith("get.")) { list.add(new Integer(Tags.forName(key.substring("get." .length())))); } } catch (Exception e) { throw new IllegalArgumentException( "Illegal entry in mcmscu.cfg - " + key + "=" + value); } } if (!list.isEmpty()) { getAttrs = new int[list.size()]; for (int i = 0; i < getAttrs.length; i++) getAttrs[i] = ((Integer) list.get(i)).intValue(); } } private Socket newSocket(String host, int port) throws IOException, GeneralSecurityException { if (cipherSuites != null) { return tls.getSocketFactory(cipherSuites).createSocket(host, port); } else { return new Socket(host, port); } } public void openAssoc() throws IOException, GeneralSecurityException { Association a = aFact.newRequestor(newSocket(url.getHost(), url .getPort())); a.setAcTimeout(acTimeout); a.setDimseTimeout(dimseTimeout); a.setSoCloseDelay(soCloseDelay); a.setPackPDVs(packPDVs); PDU assocAC = a.connect(assocRQ); if (!(assocAC instanceof AAssociateAC)) throw new IOException("Association rejected"); assoc = aFact.newActiveAssociation(a, null); assoc.start(); } public void releaseAssoc() throws InterruptedException, IOException { checkAssoc(); try { assoc.release(true); } finally { assoc = null; } } private void checkAssoc() { if (assoc == null) throw new IllegalStateException("No open association"); } private boolean checkPC(int pcid, String msgid) { if (assoc.getAssociation().getAcceptedTransferSyntaxUID(pcid) != null) return true; log.error(messages.getString(msgid)); return false; } public void echo() throws InterruptedException, IOException, GeneralSecurityException { checkAssoc(); if (checkPC(PCID_ECHO, "noPCEcho")) assoc.invoke(aFact.newDimse(PCID_ECHO, oFact.newCommand() .initCEchoRQ(1)), null); } public String create(String iuid, Dataset ds) throws InterruptedException, IOException { checkAssoc(); if (!checkPC(PCID_MCM, "noPCMcm")) return null; FutureRSP futureRsp = assoc.invoke(aFact.newDimse(PCID_MCM, oFact .newCommand().initNCreateRQ(1, UIDs.MediaCreationManagementSOPClass, iuid), ds)); Dimse rsp = futureRsp.get(); Command cmdRsp = rsp.getCommand(); Dataset dataRsp = rsp.getDataset(); int status = cmdRsp.getStatus(); switch (status) { case Status.AttributeValueOutOfRange: log.warn("Warning: Attribute Value Out Of Range: " + cmdRsp.getString(Tags.ErrorComment, "") + dataRsp); case Status.Success: return iuid != null ? iuid : cmdRsp.getAffectedSOPInstanceUID(); } log.error("Failure Status " + Integer.toHexString(status) + ": " + cmdRsp.getString(Tags.ErrorComment, "") + dataRsp); return null; } private void action(int msgid, String iuid, int actionid, Dataset attrs) throws InterruptedException, IOException { if (!checkPC(PCID_MCM, "noPCMcm")) return; FutureRSP futureRsp = assoc.invoke(aFact.newDimse(PCID_MCM, oFact .newCommand().initNActionRQ(msgid, UIDs.MediaCreationManagementSOPClass, iuid, actionid), attrs)); Dimse rsp = futureRsp.get(); Command cmdRsp = rsp.getCommand(); Dataset dataRsp = rsp.getDataset(); int status = cmdRsp.getStatus(); if (status != 0) log.error("Failure Status " + Integer.toHexString(status) + ": " + cmdRsp.getString(Tags.ErrorComment, "") + dataRsp); } public void initiate(String iuid) throws InterruptedException, IOException { log.info("Initiate Media Creation Request[iuid:" + iuid + "]\n:" + actionAttrs); action(3, iuid, INITIATE_MEDIA_CREATION, actionAttrs); } public void cancel(String iuid) throws InterruptedException, IOException { log.info("Canceling Media Creation Request[iuid:" + iuid + "]"); action(5, iuid, CANCEL_MEDIA_CREATION, null); } public void get(String iuid) throws InterruptedException, IOException { checkAssoc(); if (!checkPC(PCID_MCM, "noPCMcm")) return; FutureRSP futureRsp = assoc.invoke(aFact.newDimse(PCID_MCM, oFact .newCommand().initNGetRQ(7, UIDs.MediaCreationManagementSOPClass, iuid, getAttrs))); Dimse rsp = futureRsp.get(); Command cmdRsp = rsp.getCommand(); Dataset dataRsp = rsp.getDataset(); int status = cmdRsp.getStatus(); if (status != 0) log.error("Failure Status " + Integer.toHexString(status) + ": " + cmdRsp.getString(Tags.ErrorComment, "") + (dataRsp == null ? "" : ("\n" + dataRsp))); else log.info("Received Attributes:\n" + dataRsp); } }
apache-2.0
liveqmock/platform-tools-idea
java/java-impl/src/com/intellij/codeInsight/editorActions/wordSelection/VarargsSelectioner.java
3149
/* * Copyright 2000-2012 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.codeInsight.editorActions.wordSelection; import com.intellij.openapi.editor.Editor; import com.intellij.openapi.util.TextRange; import com.intellij.psi.*; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.util.Arrays; import java.util.Collections; import java.util.List; /** * @author Danila Ponomarenko */ public class VarargsSelectioner extends BasicSelectioner { @Override public boolean canSelect(PsiElement e) { return e instanceof PsiExpressionList; } @Override public List<TextRange> select(PsiElement e, CharSequence editorText, int cursorOffset, Editor editor) { if (!(e instanceof PsiExpressionList)) { return Collections.emptyList(); } final PsiExpressionList expressionList = (PsiExpressionList)e; final PsiParameterList parameterList = getParameterList(expressionList); if (parameterList == null) { return Collections.emptyList(); } final PsiExpression[] varargArgs = getVarargArgs(parameterList, expressionList); if (varargArgs.length == 0) { return Collections.emptyList(); } final TextRange firstExpressionRange = varargArgs[0].getTextRange(); final TextRange lastExpressionRange = varargArgs[varargArgs.length - 1].getTextRange(); return Collections.singletonList(new TextRange(firstExpressionRange.getStartOffset(), lastExpressionRange.getEndOffset())); } @NotNull private static PsiExpression[] getVarargArgs(@NotNull PsiParameterList parameterList, @NotNull PsiExpressionList expressionList) { final PsiParameter[] parameters = parameterList.getParameters(); final PsiExpression[] expressions = expressionList.getExpressions(); if (parameters.length == 0 || expressions.length == 0) { return PsiExpression.EMPTY_ARRAY; } final int varargIndex = parameters.length - 1; final PsiParameter varargParam = parameters[varargIndex]; if (!varargParam.isVarArgs() || parameters.length > expressions.length) { return PsiExpression.EMPTY_ARRAY; } return Arrays.copyOfRange(expressions, varargIndex, expressions.length); } @Nullable private static PsiParameterList getParameterList(@NotNull PsiExpressionList list) { if (!(list.getParent() instanceof PsiMethodCallExpression)) { return null; } final PsiMethod method = ((PsiMethodCallExpression)list.getParent()).resolveMethod(); return method != null ? method.getParameterList() : null; } }
apache-2.0
jmnarloch/spring-boot
spring-boot-samples/spring-boot-sample-actuator/src/main/java/sample/actuator/ExampleInfoContributor.java
1067
/* * Copyright 2012-2016 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sample.actuator; import java.util.Collections; import org.springframework.boot.actuate.info.Info; import org.springframework.boot.actuate.info.InfoContributor; import org.springframework.stereotype.Component; @Component public class ExampleInfoContributor implements InfoContributor { @Override public void contribute(Info.Builder builder) { builder.withDetail("example", Collections.singletonMap("someKey", "someValue")); } }
apache-2.0
Aethelflaed/connectbot
src/org/connectbot/ColorsActivity.java
8959
/* * ConnectBot: simple, powerful, open-source SSH client for Android * Copyright 2007 Kenny Root, Jeffrey Sharkey * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.connectbot; import java.util.Arrays; import java.util.List; import org.connectbot.util.Colors; import org.connectbot.util.HostDatabase; import org.connectbot.util.UberColorPickerDialog; import org.connectbot.util.UberColorPickerDialog.OnColorChangedListener; import android.app.Activity; import android.content.Context; import android.graphics.Canvas; import android.graphics.Paint; import android.os.Bundle; import android.view.Menu; import android.view.MenuItem; import android.view.View; import android.view.ViewGroup; import android.view.MenuItem.OnMenuItemClickListener; import android.widget.AdapterView; import android.widget.BaseAdapter; import android.widget.GridView; import android.widget.Spinner; import android.widget.AdapterView.OnItemClickListener; import android.widget.AdapterView.OnItemSelectedListener; /** * @author Kenny Root * */ public class ColorsActivity extends Activity implements OnItemClickListener, OnColorChangedListener, OnItemSelectedListener { private GridView mColorGrid; private Spinner mFgSpinner; private Spinner mBgSpinner; private int mColorScheme; private List<Integer> mColorList; private HostDatabase hostdb; private int mCurrentColor = 0; private int[] mDefaultColors; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.act_colors); this.setTitle(String.format("%s: %s", getResources().getText(R.string.app_name), getResources().getText(R.string.title_colors))); mColorScheme = HostDatabase.DEFAULT_COLOR_SCHEME; hostdb = new HostDatabase(this); mColorList = Arrays.asList(hostdb.getColorsForScheme(mColorScheme)); mDefaultColors = hostdb.getDefaultColorsForScheme(mColorScheme); mColorGrid = (GridView) findViewById(R.id.color_grid); mColorGrid.setAdapter(new ColorsAdapter(true)); mColorGrid.setOnItemClickListener(this); mColorGrid.setSelection(0); mFgSpinner = (Spinner) findViewById(R.id.fg); mFgSpinner.setAdapter(new ColorsAdapter(false)); mFgSpinner.setSelection(mDefaultColors[0]); mFgSpinner.setOnItemSelectedListener(this); mBgSpinner = (Spinner) findViewById(R.id.bg); mBgSpinner.setAdapter(new ColorsAdapter(false)); mBgSpinner.setSelection(mDefaultColors[1]); mBgSpinner.setOnItemSelectedListener(this); } @Override protected void onDestroy() { super.onDestroy(); if (hostdb != null) { hostdb.close(); hostdb = null; } } @Override protected void onResume() { super.onResume(); if (hostdb == null) hostdb = new HostDatabase(this); } private class ColorsAdapter extends BaseAdapter { private boolean mSquareViews; public ColorsAdapter(boolean squareViews) { mSquareViews = squareViews; } public View getView(int position, View convertView, ViewGroup parent) { ColorView c; if (convertView == null) { c = new ColorView(ColorsActivity.this, mSquareViews); } else { c = (ColorView) convertView; } c.setColor(mColorList.get(position)); c.setNumber(position + 1); return c; } public int getCount() { return mColorList.size(); } public Object getItem(int position) { return mColorList.get(position); } public long getItemId(int position) { return position; } } private class ColorView extends View { private boolean mSquare; private Paint mTextPaint; private Paint mShadowPaint; // Things we paint private int mBackgroundColor; private String mText; private int mAscent; private int mWidthCenter; private int mHeightCenter; public ColorView(Context context, boolean square) { super(context); mSquare = square; mTextPaint = new Paint(); mTextPaint.setAntiAlias(true); mTextPaint.setTextSize(16); mTextPaint.setColor(0xFFFFFFFF); mTextPaint.setTextAlign(Paint.Align.CENTER); mShadowPaint = new Paint(mTextPaint); mShadowPaint.setStyle(Paint.Style.STROKE); mShadowPaint.setStrokeCap(Paint.Cap.ROUND); mShadowPaint.setStrokeJoin(Paint.Join.ROUND); mShadowPaint.setStrokeWidth(4f); mShadowPaint.setColor(0xFF000000); setPadding(10, 10, 10, 10); } public void setColor(int color) { mBackgroundColor = color; } public void setNumber(int number) { mText = Integer.toString(number); } @Override protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) { int width = measureWidth(widthMeasureSpec); int height; if (mSquare) height = width; else height = measureHeight(heightMeasureSpec); mAscent = (int) mTextPaint.ascent(); mWidthCenter = width / 2; mHeightCenter = height / 2 - mAscent / 2; setMeasuredDimension(width, height); } private int measureWidth(int measureSpec) { int result = 0; int specMode = MeasureSpec.getMode(measureSpec); int specSize = MeasureSpec.getSize(measureSpec); if (specMode == MeasureSpec.EXACTLY) { // We were told how big to be result = specSize; } else { // Measure the text result = (int) mTextPaint.measureText(mText) + getPaddingLeft() + getPaddingRight(); if (specMode == MeasureSpec.AT_MOST) { // Respect AT_MOST value if that was what is called for by // measureSpec result = Math.min(result, specSize); } } return result; } private int measureHeight(int measureSpec) { int result = 0; int specMode = MeasureSpec.getMode(measureSpec); int specSize = MeasureSpec.getSize(measureSpec); mAscent = (int) mTextPaint.ascent(); if (specMode == MeasureSpec.EXACTLY) { // We were told how big to be result = specSize; } else { // Measure the text (beware: ascent is a negative number) result = (int) (-mAscent + mTextPaint.descent()) + getPaddingTop() + getPaddingBottom(); if (specMode == MeasureSpec.AT_MOST) { // Respect AT_MOST value if that was what is called for by // measureSpec result = Math.min(result, specSize); } } return result; } @Override protected void onDraw(Canvas canvas) { super.onDraw(canvas); canvas.drawColor(mBackgroundColor); canvas.drawText(mText, mWidthCenter, mHeightCenter, mShadowPaint); canvas.drawText(mText, mWidthCenter, mHeightCenter, mTextPaint); } } private void editColor(int colorNumber) { mCurrentColor = colorNumber; new UberColorPickerDialog(this, this, mColorList.get(colorNumber)).show(); } public void onItemClick(AdapterView<?> parent, View view, int position, long id) { editColor(position); } public void onNothingSelected(AdapterView<?> arg0) { } public void colorChanged(int value) { hostdb.setGlobalColor(mCurrentColor, value); mColorList.set(mCurrentColor, value); mColorGrid.invalidateViews(); } public void onItemSelected(AdapterView<?> parent, View view, int position, long id) { boolean needUpdate = false; if (parent == mFgSpinner) { if (position != mDefaultColors[0]) { mDefaultColors[0] = position; needUpdate = true; } } else if (parent == mBgSpinner) { if (position != mDefaultColors[1]) { mDefaultColors[1] = position; needUpdate = true; } } if (needUpdate) hostdb.setDefaultColorsForScheme(mColorScheme, mDefaultColors[0], mDefaultColors[1]); } @Override public boolean onCreateOptionsMenu(Menu menu) { super.onCreateOptionsMenu(menu); MenuItem reset = menu.add(R.string.menu_colors_reset); reset.setAlphabeticShortcut('r'); reset.setNumericShortcut('1'); reset.setIcon(android.R.drawable.ic_menu_revert); reset.setOnMenuItemClickListener(new OnMenuItemClickListener() { public boolean onMenuItemClick(MenuItem arg0) { // Reset each individual color to defaults. for (int i = 0; i < Colors.defaults.length; i++) { if (mColorList.get(i) != Colors.defaults[i]) { hostdb.setGlobalColor(i, Colors.defaults[i]); mColorList.set(i, Colors.defaults[i]); } } mColorGrid.invalidateViews(); // Reset the default FG/BG colors as well. mFgSpinner.setSelection(HostDatabase.DEFAULT_FG_COLOR); mBgSpinner.setSelection(HostDatabase.DEFAULT_BG_COLOR); hostdb.setDefaultColorsForScheme(HostDatabase.DEFAULT_COLOR_SCHEME, HostDatabase.DEFAULT_FG_COLOR, HostDatabase.DEFAULT_BG_COLOR); return true; } }); return true; } }
apache-2.0
GreenLightning/libgdx
extensions/gdx-bullet/jni/swig-src/collision/com/badlogic/gdx/physics/bullet/collision/btPolyhedralContactClipping.java
3698
/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 3.0.2 * * Do not make changes to this file unless you know what you are doing--modify * the SWIG interface file instead. * ----------------------------------------------------------------------------- */ package com.badlogic.gdx.physics.bullet.collision; import com.badlogic.gdx.physics.bullet.BulletBase; import com.badlogic.gdx.physics.bullet.linearmath.*; import com.badlogic.gdx.math.Vector3; import com.badlogic.gdx.math.Quaternion; import com.badlogic.gdx.math.Matrix3; import com.badlogic.gdx.math.Matrix4; public class btPolyhedralContactClipping extends BulletBase { private long swigCPtr; protected btPolyhedralContactClipping(final String className, long cPtr, boolean cMemoryOwn) { super(className, cPtr, cMemoryOwn); swigCPtr = cPtr; } /** Construct a new btPolyhedralContactClipping, normally you should not need this constructor it's intended for low-level usage. */ public btPolyhedralContactClipping(long cPtr, boolean cMemoryOwn) { this("btPolyhedralContactClipping", cPtr, cMemoryOwn); construct(); } @Override protected void reset(long cPtr, boolean cMemoryOwn) { if (!destroyed) destroy(); super.reset(swigCPtr = cPtr, cMemoryOwn); } public static long getCPtr(btPolyhedralContactClipping obj) { return (obj == null) ? 0 : obj.swigCPtr; } @Override protected void finalize() throws Throwable { if (!destroyed) destroy(); super.finalize(); } @Override protected synchronized void delete() { if (swigCPtr != 0) { if (swigCMemOwn) { swigCMemOwn = false; CollisionJNI.delete_btPolyhedralContactClipping(swigCPtr); } swigCPtr = 0; } super.delete(); } public static void clipHullAgainstHull(Vector3 separatingNormal, btConvexPolyhedron hullA, btConvexPolyhedron hullB, Matrix4 transA, Matrix4 transB, float minDist, float maxDist, btDiscreteCollisionDetectorInterface.Result resultOut) { CollisionJNI.btPolyhedralContactClipping_clipHullAgainstHull(separatingNormal, btConvexPolyhedron.getCPtr(hullA), hullA, btConvexPolyhedron.getCPtr(hullB), hullB, transA, transB, minDist, maxDist, btDiscreteCollisionDetectorInterface.Result.getCPtr(resultOut), resultOut); } public static void clipFaceAgainstHull(Vector3 separatingNormal, btConvexPolyhedron hullA, Matrix4 transA, btVector3Array worldVertsB1, float minDist, float maxDist, btDiscreteCollisionDetectorInterface.Result resultOut) { CollisionJNI.btPolyhedralContactClipping_clipFaceAgainstHull(separatingNormal, btConvexPolyhedron.getCPtr(hullA), hullA, transA, btVector3Array.getCPtr(worldVertsB1), worldVertsB1, minDist, maxDist, btDiscreteCollisionDetectorInterface.Result.getCPtr(resultOut), resultOut); } public static boolean findSeparatingAxis(btConvexPolyhedron hullA, btConvexPolyhedron hullB, Matrix4 transA, Matrix4 transB, Vector3 sep, btDiscreteCollisionDetectorInterface.Result resultOut) { return CollisionJNI.btPolyhedralContactClipping_findSeparatingAxis(btConvexPolyhedron.getCPtr(hullA), hullA, btConvexPolyhedron.getCPtr(hullB), hullB, transA, transB, sep, btDiscreteCollisionDetectorInterface.Result.getCPtr(resultOut), resultOut); } public static void clipFace(btVector3Array pVtxIn, btVector3Array ppVtxOut, Vector3 planeNormalWS, float planeEqWS) { CollisionJNI.btPolyhedralContactClipping_clipFace(btVector3Array.getCPtr(pVtxIn), pVtxIn, btVector3Array.getCPtr(ppVtxOut), ppVtxOut, planeNormalWS, planeEqWS); } public btPolyhedralContactClipping() { this(CollisionJNI.new_btPolyhedralContactClipping(), true); } }
apache-2.0
asedunov/intellij-community
plugins/cvs/cvs-plugin/src/com/intellij/cvsSupport2/connections/CvsConnectionSettings.java
3496
/* * Copyright 2000-2009 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.cvsSupport2.connections; import com.intellij.cvsSupport2.config.*; import com.intellij.cvsSupport2.cvsoperations.cvsMessages.CvsListenerWithProgress; import com.intellij.cvsSupport2.cvsoperations.dateOrRevision.RevisionOrDate; import com.intellij.cvsSupport2.errorHandling.ErrorRegistry; import com.intellij.cvsSupport2.javacvsImpl.io.ReadWriteStatistics; import org.netbeans.lib.cvsclient.CvsRoot; import org.netbeans.lib.cvsclient.connection.IConnection; /** * author: lesya */ public abstract class CvsConnectionSettings extends CvsRootData implements CvsEnvironment, CvsSettings { private final CvsRootConfiguration myCvsRootConfiguration; private boolean myOffline; protected CvsConnectionSettings(CvsRootConfiguration cvsRootConfiguration) { super(cvsRootConfiguration.getCvsRootAsString()); PORT = getDefaultPort(); myCvsRootConfiguration = cvsRootConfiguration; } public abstract int getDefaultPort(); public RevisionOrDate getRevisionOrDate() { return RevisionOrDate.EMPTY; } public String getRepository() { return REPOSITORY; } public CvsRoot getCvsRoot() { return new CvsRoot(USER, REPOSITORY, getCvsRootAsString()); } public boolean isValid() { return true; } public IConnection createConnection(ReadWriteStatistics statistics) { CvsListenerWithProgress cvsCommandStopper = CvsListenerWithProgress.createOnProgress(); IConnection originalConnection = createOriginalConnection(cvsCommandStopper, myCvsRootConfiguration); if (originalConnection instanceof SelfTestingConnection) { return new SelfTestingConnectionWrapper(originalConnection, statistics, cvsCommandStopper); } else { return new ConnectionWrapper(originalConnection, statistics, cvsCommandStopper); } } protected abstract IConnection createOriginalConnection(ErrorRegistry errorRegistry, CvsRootConfiguration cvsRootConfiguration); protected ExtConfiguration getExtConfiguration() { return myCvsRootConfiguration.EXT_CONFIGURATION; } protected LocalSettings getLocalConfiguration() { return myCvsRootConfiguration.LOCAL_CONFIGURATION; } protected SshSettings getSshConfiguration() { return myCvsRootConfiguration.SSH_CONFIGURATION; } public ProxySettings getProxySettings(){ return myCvsRootConfiguration.PROXY_SETTINGS; } public void setUseProxy(String proxyHost, String proxyPort) { super.setUseProxy(proxyHost, proxyPort); final ProxySettings settings = myCvsRootConfiguration.PROXY_SETTINGS; settings.PROXY_HOST = proxyHost; try { settings.PROXY_PORT = Integer.parseInt(proxyPort); } catch (NumberFormatException e) { //ignore } settings.USE_PROXY = true; } public boolean isOffline() { return myOffline; } public void setOffline(final boolean offline) { myOffline = offline; } }
apache-2.0
asedunov/intellij-community
platform/platform-api/src/com/intellij/codeInsight/hint/QuestionAction.java
696
/* * Copyright 2000-2009 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.codeInsight.hint; public interface QuestionAction{ boolean execute(); }
apache-2.0
ikedam/jenkins
test/src/test/java/hudson/slaves/NodeProvisionerTest.java
8605
/* * The MIT License * * Copyright (c) 2004-2009, Sun Microsystems, Inc., Kohsuke Kawaguchi * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package hudson.slaves; import hudson.BulkChange; import hudson.Launcher; import hudson.model.*; import hudson.tasks.Builder; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import static org.junit.Assert.*; import org.junit.Rule; import org.junit.Test; import org.jvnet.hudson.test.JenkinsRule; import org.jvnet.hudson.test.RandomlyFails; import org.jvnet.hudson.test.SleepBuilder; /** * @author Kohsuke Kawaguchi */ public class NodeProvisionerTest { @Rule public JenkinsRule r = new NodeProvisionerRule(/* run x1000 the regular speed to speed up the test */10, 100, 10); /** * Latch synchronization primitive that waits for N thread to pass the checkpoint. * <p> * This is used to make sure we get a set of builds that run long enough. */ static class Latch { /** Initial value */ public final CountDownLatch counter; private final int init; Latch(int n) { this.init = n; this.counter = new CountDownLatch(n); } void block() throws InterruptedException { this.counter.countDown(); this.counter.await(60, TimeUnit.SECONDS); } /** * Creates a builder that blocks until the latch opens. */ public Builder createBuilder() { return new Builder() { public boolean perform(AbstractBuild<?, ?> build, Launcher launcher, BuildListener listener) throws InterruptedException, IOException { block(); return true; } }; } } /** * Scenario: schedule a build and see if one slave is provisioned. */ @RandomlyFails("fragile") @Test public void autoProvision() throws Exception { BulkChange bc = new BulkChange(r.jenkins); try { DummyCloudImpl cloud = initHudson(10); FreeStyleProject p = createJob(new SleepBuilder(10)); Future<FreeStyleBuild> f = p.scheduleBuild2(0); f.get(30, TimeUnit.SECONDS); // if it's taking too long, abort. // since there's only one job, we expect there to be just one slave assertEquals(1,cloud.numProvisioned); } finally { bc.abort(); } } /** * Scenario: we got a lot of jobs all of the sudden, and we need to fire up a few nodes. */ @RandomlyFails("fragile") @Test public void loadSpike() throws Exception { BulkChange bc = new BulkChange(r.jenkins); try { DummyCloudImpl cloud = initHudson(0); verifySuccessfulCompletion(buildAll(create5SlowJobs(new Latch(5)))); // the time it takes to complete a job is eternally long compared to the time it takes to launch // a new slave, so in this scenario we end up allocating 5 slaves for 5 jobs. assertEquals(5,cloud.numProvisioned); } finally { bc.abort(); } } /** * Scenario: make sure we take advantage of statically configured slaves. */ @RandomlyFails("fragile") @Test public void baselineSlaveUsage() throws Exception { BulkChange bc = new BulkChange(r.jenkins); try { DummyCloudImpl cloud = initHudson(0); // add slaves statically upfront r.createSlave().toComputer().connect(false).get(); r.createSlave().toComputer().connect(false).get(); verifySuccessfulCompletion(buildAll(create5SlowJobs(new Latch(5)))); // we should have used two static slaves, thus only 3 slaves should have been provisioned assertEquals(3,cloud.numProvisioned); } finally { bc.abort(); } } /** * Scenario: loads on one label shouldn't translate to load on another label. */ @RandomlyFails("fragile") @Test public void labels() throws Exception { BulkChange bc = new BulkChange(r.jenkins); try { DummyCloudImpl cloud = initHudson(0); Label blue = r.jenkins.getLabel("blue"); Label red = r.jenkins.getLabel("red"); cloud.label = red; // red jobs List<FreeStyleProject> redJobs = create5SlowJobs(new Latch(5)); for (FreeStyleProject p : redJobs) p.setAssignedLabel(red); // blue jobs List<FreeStyleProject> blueJobs = create5SlowJobs(new Latch(5)); for (FreeStyleProject p : blueJobs) p.setAssignedLabel(blue); // build all List<Future<FreeStyleBuild>> blueBuilds = buildAll(blueJobs); verifySuccessfulCompletion(buildAll(redJobs)); // cloud should only give us 5 nodes for 5 red jobs assertEquals(5,cloud.numProvisioned); // and all blue jobs should be still stuck in the queue for (Future<FreeStyleBuild> bb : blueBuilds) assertFalse(bb.isDone()); } finally { bc.abort(); } } private FreeStyleProject createJob(Builder builder) throws IOException { FreeStyleProject p = r.createFreeStyleProject(); p.setAssignedLabel(null); // let it roam free, or else it ties itself to the master since we have no slaves p.getBuildersList().add(builder); return p; } private DummyCloudImpl initHudson(int delay) throws IOException { // start a dummy service DummyCloudImpl cloud = new DummyCloudImpl(r, delay); r.jenkins.clouds.add(cloud); // no build on the master, to make sure we get everything from the cloud r.jenkins.setNumExecutors(0); r.jenkins.setNodes(Collections.<Node>emptyList()); return cloud; } private List<FreeStyleProject> create5SlowJobs(Latch l) throws IOException { List<FreeStyleProject> jobs = new ArrayList<FreeStyleProject>(); for( int i=0; i<l.init; i++) //set a large delay, to simulate the situation where we need to provision more slaves // to keep up with the load jobs.add(createJob(l.createBuilder())); return jobs; } /** * Builds all the given projects at once. */ private List<Future<FreeStyleBuild>> buildAll(List<FreeStyleProject> jobs) { System.out.println("Scheduling builds for "+jobs.size()+" jobs"); List<Future<FreeStyleBuild>> builds = new ArrayList<Future<FreeStyleBuild>>(); for (FreeStyleProject job : jobs) builds.add(job.scheduleBuild2(0)); return builds; } private void verifySuccessfulCompletion(List<Future<FreeStyleBuild>> builds) throws Exception { System.out.println("Waiting for a completion"); for (Future<FreeStyleBuild> f : builds) { try { r.assertBuildStatus(Result.SUCCESS, f.get(90, TimeUnit.SECONDS)); } catch (TimeoutException e) { // time out so that the automated test won't hang forever, even when we have bugs System.out.println("Build didn't complete in time"); throw e; } } } }
mit
gfyoung/elasticsearch
server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java
4213
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.action.admin.indices.mapping.get; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; /** Request the mappings of specific fields */ public class GetFieldMappingsRequest extends ActionRequest implements IndicesRequest.Replaceable { protected boolean local = false; private String[] fields = Strings.EMPTY_ARRAY; private boolean includeDefaults = false; private String[] indices = Strings.EMPTY_ARRAY; private String[] types = Strings.EMPTY_ARRAY; private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); public GetFieldMappingsRequest() { } /** * Indicate whether the receiving node should operate based on local index information or forward requests, * where needed, to other nodes. If running locally, request will not raise errors if running locally &amp; missing indices. */ public GetFieldMappingsRequest local(boolean local) { this.local = local; return this; } public boolean local() { return local; } @Override public GetFieldMappingsRequest indices(String... indices) { this.indices = indices; return this; } public GetFieldMappingsRequest types(String... types) { this.types = types; return this; } public GetFieldMappingsRequest indicesOptions(IndicesOptions indicesOptions) { this.indicesOptions = indicesOptions; return this; } @Override public String[] indices() { return indices; } public String[] types() { return types; } @Override public IndicesOptions indicesOptions() { return indicesOptions; } /** @param fields a list of fields to retrieve the mapping for */ public GetFieldMappingsRequest fields(String... fields) { this.fields = fields; return this; } public String[] fields() { return fields; } public boolean includeDefaults() { return includeDefaults; } /** Indicates whether default mapping settings should be returned */ public GetFieldMappingsRequest includeDefaults(boolean includeDefaults) { this.includeDefaults = includeDefaults; return this; } @Override public ActionRequestValidationException validate() { return null; } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(indices); out.writeStringArray(types); indicesOptions.writeIndicesOptions(out); out.writeBoolean(local); out.writeStringArray(fields); out.writeBoolean(includeDefaults); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); indices = in.readStringArray(); types = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); local = in.readBoolean(); fields = in.readStringArray(); includeDefaults = in.readBoolean(); } }
apache-2.0
rokn/Count_Words_2015
testing/openjdk2/jdk/src/share/classes/sun/tools/asm/ArrayData.java
1633
/* * Copyright (c) 1995, 2003, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package sun.tools.asm; import sun.tools.java.*; /** * WARNING: The contents of this source file are not part of any * supported API. Code that depends on them does so at its own risk: * they are subject to change or removal without notice. */ public final class ArrayData { Type type; int nargs; public ArrayData(Type type, int nargs) { this.type = type; this.nargs = nargs; } }
mit
asedunov/intellij-community
java/java-tests/testData/codeInsight/daemonCodeAnalyzer/quickFix/createLocalVarFromInstanceof/before5.java
133
// "Insert '(Runnable)this' declaration" "true" class C { void f() { if (this instanceof Runnable) { <caret> } } }
apache-2.0
asedunov/intellij-community
java/java-tests/testData/codeInsight/daemonCodeAnalyzer/quickFix/createLocalVarFromInstanceof/before3.java
157
// "Insert '(Runnable)this' declaration" "true" class C { void f() { while (!(<caret>this instanceof Runnable)) { //return; } } }
apache-2.0
rokn/Count_Words_2015
testing/openjdk2/langtools/test/tools/javac/generics/6207386/Test.java
1381
/* * Copyright (c) 2005, 2006, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /* * @test * @bug 6207386 * @summary Undecidable type system leads to crash * @author Peter von der Ah\u00e9 * @compile Test.java */ public class Test<T> { <T extends Test<? super Number>> T m1(T t) { return m2(t); } <T extends Test<? super Number>> T m2(T t) { return null; } }
mit
queirozfcom/elasticsearch
plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java
9815
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.cloud.azure.storage; import com.microsoft.azure.storage.CloudStorageAccount; import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.blob.*; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoryException; import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.net.URISyntaxException; import java.util.Map; import static org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage.*; /** * */ public class AzureStorageServiceImpl extends AbstractLifecycleComponent<AzureStorageServiceImpl> implements AzureStorageService { private final String account; private final String key; private final String blob; private CloudBlobClient client; @Inject public AzureStorageServiceImpl(Settings settings) { super(settings); // We try to load storage API settings from `cloud.azure.` account = settings.get(ACCOUNT); key = settings.get(KEY); blob = "http://" + account + ".blob.core.windows.net/"; try { if (account != null) { logger.trace("creating new Azure storage client using account [{}], key [{}], blob [{}]", account, key, blob); String storageConnectionString = "DefaultEndpointsProtocol=http;" + "AccountName="+ account +";" + "AccountKey=" + key; // Retrieve storage account from connection-string. CloudStorageAccount storageAccount = CloudStorageAccount.parse(storageConnectionString); // Create the blob client. client = storageAccount.createCloudBlobClient(); } } catch (Exception e) { // Can not start Azure Storage Client logger.error("can not start azure storage client: {}", e.getMessage()); } } @Override public boolean doesContainerExist(String container) { try { CloudBlobContainer blob_container = client.getContainerReference(container); return blob_container.exists(); } catch (Exception e) { logger.error("can not access container [{}]", container); } return false; } @Override public void removeContainer(String container) throws URISyntaxException, StorageException { CloudBlobContainer blob_container = client.getContainerReference(container); // TODO Should we set some timeout and retry options? /* BlobRequestOptions options = new BlobRequestOptions(); options.setTimeoutIntervalInMs(1000); options.setRetryPolicyFactory(new RetryNoRetry()); blob_container.deleteIfExists(options, null); */ logger.trace("removing container [{}]", container); blob_container.deleteIfExists(); } @Override public void createContainer(String container) throws URISyntaxException, StorageException { try { CloudBlobContainer blob_container = client.getContainerReference(container); logger.trace("creating container [{}]", container); blob_container.createIfNotExists(); } catch (IllegalArgumentException e) { logger.trace("fails creating container [{}]", container, e.getMessage()); throw new RepositoryException(container, e.getMessage()); } } @Override public void deleteFiles(String container, String path) throws URISyntaxException, StorageException { logger.trace("delete files container [{}], path [{}]", container, path); // Container name must be lower case. CloudBlobContainer blob_container = client.getContainerReference(container); if (blob_container.exists()) { for (ListBlobItem blobItem : blob_container.listBlobs(path)) { logger.trace("removing blob [{}]", blobItem.getUri()); deleteBlob(container, blobItem.getUri().toString()); } } } @Override public boolean blobExists(String container, String blob) throws URISyntaxException, StorageException { // Container name must be lower case. CloudBlobContainer blob_container = client.getContainerReference(container); if (blob_container.exists()) { CloudBlockBlob azureBlob = blob_container.getBlockBlobReference(blob); return azureBlob.exists(); } return false; } @Override public void deleteBlob(String container, String blob) throws URISyntaxException, StorageException { logger.trace("delete blob for container [{}], blob [{}]", container, blob); // Container name must be lower case. CloudBlobContainer blob_container = client.getContainerReference(container); if (blob_container.exists()) { logger.trace("container [{}]: blob [{}] found. removing.", container, blob); CloudBlockBlob azureBlob = blob_container.getBlockBlobReference(blob); azureBlob.delete(); } } @Override public InputStream getInputStream(String container, String blob) throws URISyntaxException, StorageException { logger.trace("reading container [{}], blob [{}]", container, blob); return client.getContainerReference(container).getBlockBlobReference(blob).openInputStream(); } @Override public OutputStream getOutputStream(String container, String blob) throws URISyntaxException, StorageException { logger.trace("writing container [{}], blob [{}]", container, blob); return client.getContainerReference(container).getBlockBlobReference(blob).openOutputStream(); } @Override public Map<String, BlobMetaData> listBlobsByPrefix(String container, String keyPath, String prefix) throws URISyntaxException, StorageException { logger.debug("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix); MapBuilder<String, BlobMetaData> blobsBuilder = MapBuilder.newMapBuilder(); CloudBlobContainer blobContainer = client.getContainerReference(container); if (blobContainer.exists()) { for (ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix))) { URI uri = blobItem.getUri(); logger.trace("blob url [{}]", uri); // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ // this requires 1 + container.length() + 1, with each 1 corresponding to one of the / String blobPath = uri.getPath().substring(1 + container.length() + 1); CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobPath); // fetch the blob attributes from Azure (getBlockBlobReference does not do this) // this is needed to retrieve the blob length (among other metadata) from Azure Storage blob.downloadAttributes(); BlobProperties properties = blob.getProperties(); String name = blobPath.substring(keyPath.length()); logger.trace("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength()); blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getLength())); } } return blobsBuilder.immutableMap(); } @Override public void moveBlob(String container, String sourceBlob, String targetBlob) throws URISyntaxException, StorageException { logger.debug("moveBlob container [{}], sourceBlob [{}], targetBlob [{}]", container, sourceBlob, targetBlob); CloudBlobContainer blob_container = client.getContainerReference(container); CloudBlockBlob blobSource = blob_container.getBlockBlobReference(sourceBlob); if (blobSource.exists()) { CloudBlockBlob blobTarget = blob_container.getBlockBlobReference(targetBlob); blobTarget.startCopyFromBlob(blobSource); blobSource.delete(); logger.debug("moveBlob container [{}], sourceBlob [{}], targetBlob [{}] -> done", container, sourceBlob, targetBlob); } } @Override protected void doStart() throws ElasticsearchException { logger.debug("starting azure storage client instance"); } @Override protected void doStop() throws ElasticsearchException { logger.debug("stopping azure storage client instance"); } @Override protected void doClose() throws ElasticsearchException { } }
apache-2.0
jerome-jacob/selenium
java/server/src/org/openqa/grid/common/exception/RemoteNotReachableException.java
1059
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.grid.common.exception; public class RemoteNotReachableException extends RemoteException { private static final long serialVersionUID = 3540250584036529453L; public RemoteNotReachableException(String msg) { super(msg); } }
apache-2.0
cnfire/hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/SCMAdmin.java
6187
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client; import java.io.IOException; import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.server.api.SCMAdminProtocol; import org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskResponse; public class SCMAdmin extends Configured implements Tool { private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); public SCMAdmin() { super(); } public SCMAdmin(Configuration conf) { super(conf); } private static void printHelp(String cmd) { String summary = "scmadmin is the command to execute shared cache manager" + "administrative commands.\n" + "The full syntax is: \n\n" + "yarn scmadmin" + " [-runCleanerTask]" + " [-help [cmd]]\n"; String runCleanerTask = "-runCleanerTask: Run cleaner task right away.\n"; String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" + "\t\tis specified.\n"; if ("runCleanerTask".equals(cmd)) { System.out.println(runCleanerTask); } else if ("help".equals(cmd)) { System.out.println(help); } else { System.out.println(summary); System.out.println(runCleanerTask); System.out.println(help); System.out.println(); ToolRunner.printGenericCommandUsage(System.out); } } /** * Displays format of commands. * @param cmd The command that is being executed. */ private static void printUsage(String cmd) { if ("-runCleanerTask".equals(cmd)) { System.err.println("Usage: yarn scmadmin" + " [-runCleanerTask]"); } else { System.err.println("Usage: yarn scmadmin"); System.err.println(" [-runCleanerTask]"); System.err.println(" [-help [cmd]]"); System.err.println(); ToolRunner.printGenericCommandUsage(System.err); } } protected SCMAdminProtocol createSCMAdminProtocol() throws IOException { // Get the current configuration final YarnConfiguration conf = new YarnConfiguration(getConf()); // Create the admin client final InetSocketAddress addr = conf.getSocketAddr( YarnConfiguration.SCM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_SCM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_SCM_ADMIN_PORT); final YarnRPC rpc = YarnRPC.create(conf); SCMAdminProtocol scmAdminProtocol = (SCMAdminProtocol) rpc.getProxy(SCMAdminProtocol.class, addr, conf); return scmAdminProtocol; } private int runCleanerTask() throws YarnException, IOException { // run cleaner task right away SCMAdminProtocol scmAdminProtocol = createSCMAdminProtocol(); RunSharedCacheCleanerTaskRequest request = recordFactory.newRecordInstance(RunSharedCacheCleanerTaskRequest.class); RunSharedCacheCleanerTaskResponse response = scmAdminProtocol.runCleanerTask(request); if (response.getAccepted()) { System.out.println("request accepted by shared cache manager"); return 0; } else { System.out.println("request rejected by shared cache manager"); return 1; } } @Override public int run(String[] args) throws Exception { if (args.length < 1) { printUsage(""); return -1; } int i = 0; String cmd = args[i++]; try { if ("-runCleanerTask".equals(cmd)) { if (args.length != 1) { printUsage(cmd); return -1; } else { return runCleanerTask(); } } else if ("-help".equals(cmd)) { if (i < args.length) { printUsage(args[i]); } else { printHelp(""); } return 0; } else { System.err.println(cmd.substring(1) + ": Unknown command"); printUsage(""); return -1; } } catch (IllegalArgumentException arge) { System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage()); printUsage(cmd); } catch (RemoteException e) { // // This is a error returned by hadoop server. Print // out the first line of the error message, ignore the stack trace. try { String[] content; content = e.getLocalizedMessage().split("\n"); System.err.println(cmd.substring(1) + ": " + content[0]); } catch (Exception ex) { System.err.println(cmd.substring(1) + ": " + ex.getLocalizedMessage()); } } catch (Exception e) { System.err.println(cmd.substring(1) + ": " + e.getLocalizedMessage()); } return -1; } public static void main(String[] args) throws Exception { int result = ToolRunner.run(new SCMAdmin(), args); System.exit(result); } }
apache-2.0
tseen/Federated-HDFS
tseenliu/FedHDFS-hadoop-src/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
3477
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.cli; import static org.junit.Assert.assertTrue; import org.apache.hadoop.cli.util.CLICommand; import org.apache.hadoop.cli.util.CommandExecutor.Result; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.authorize.PolicyProvider; import org.junit.After; import org.junit.Before; import org.junit.Test; public class TestHDFSCLI extends CLITestHelperDFS { protected MiniDFSCluster dfsCluster = null; protected FileSystem fs = null; protected String namenode = null; @Before @Override public void setUp() throws Exception { super.setUp(); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class); // Many of the tests expect a replication value of 1 in the output conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); // Build racks and hosts configuration to test dfsAdmin -printTopology String [] racks = {"/rack1", "/rack1", "/rack2", "/rack2", "/rack2", "/rack3", "/rack4", "/rack4" }; String [] hosts = {"host1", "host2", "host3", "host4", "host5", "host6", "host7", "host8" }; dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8) .racks(racks) .hosts(hosts) .build(); dfsCluster.waitClusterUp(); namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///"); username = System.getProperty("user.name"); fs = dfsCluster.getFileSystem(); assertTrue("Not a HDFS: "+fs.getUri(), fs instanceof DistributedFileSystem); } @Override protected String getTestFile() { return "testHDFSConf.xml"; } @After @Override public void tearDown() throws Exception { if (fs != null) { fs.close(); } if (dfsCluster != null) { dfsCluster.shutdown(); } Thread.sleep(2000); super.tearDown(); } @Override protected String expandCommand(final String cmd) { String expCmd = cmd; expCmd = expCmd.replaceAll("NAMENODE", namenode); expCmd = super.expandCommand(expCmd); return expCmd; } @Override protected Result execute(CLICommand cmd) throws Exception { return cmd.getExecutor(namenode).executeCommand(cmd.getCmd()); } @Test @Override public void testAll () { super.testAll(); } }
apache-2.0
punkhorn/camel-upstream
components/camel-salesforce/camel-salesforce-component/src/main/java/org/apache/camel/component/salesforce/api/dto/analytics/reports/ReportTypeColumn.java
1823
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.salesforce.api.dto.analytics.reports; import org.apache.camel.component.salesforce.api.dto.AbstractDTOBase; /** * Report categories columns. */ public class ReportTypeColumn extends AbstractDTOBase { private String label; private FilterValue[] filterValues; private String dataType; private Boolean filterable; public String getLabel() { return label; } public void setLabel(String label) { this.label = label; } public FilterValue[] getFilterValues() { return filterValues; } public void setFilterValues(FilterValue[] filterValues) { this.filterValues = filterValues; } public String getDataType() { return dataType; } public void setDataType(String dataType) { this.dataType = dataType; } public Boolean getFilterable() { return filterable; } public void setFilterable(Boolean filterable) { this.filterable = filterable; } }
apache-2.0
lindzh/jenkins
core/src/main/java/hudson/slaves/NodePropertyDescriptor.java
2411
/* * The MIT License * * Copyright (c) 2004-2009, Sun Microsystems, Inc., Tom Huybrechts * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package hudson.slaves; import hudson.Extension; import hudson.model.Node; import hudson.tools.PropertyDescriptor; import jenkins.model.Jenkins; /** * Descriptor for {@link NodeProperty}. * * <p> * Put {@link Extension} on your descriptor implementation to have it auto-registered. * * @since 1.286 * @see NodeProperty */ public abstract class NodePropertyDescriptor extends PropertyDescriptor<NodeProperty<?>,Node> { protected NodePropertyDescriptor(Class<? extends NodeProperty<?>> clazz) { super(clazz); } protected NodePropertyDescriptor() { } /** * Is this node property one where it makes sense to permit it as a global node property. * * @return {@code true} if and only if the node property can be listed as a global node property. * @since 1.520 */ public boolean isApplicableAsGlobal() { // preserve legacy behaviour, even if brain-dead stupid, where applying to Jenkins was the discriminator // note that it would be a mistake to assume Jenkins.getInstance().getClass() == Jenkins.class // the groovy code tested against app.class, so we replicate that exact logic. return isApplicable(Jenkins.getInstance().getClass()); } }
mit
tseen/Federated-HDFS
tseenliu/FedHDFS-hadoop-src/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
4290
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.viewfs; import java.io.IOException; import java.net.URISyntaxException; import javax.security.auth.login.LoginException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.security.UserGroupInformation; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest { private static MiniDFSCluster cluster; private static Path defaultWorkingDirectory; private static Path defaultWorkingDirectory2; private static final Configuration CONF = new Configuration(); private static FileSystem fHdfs; private static FileSystem fHdfs2; private FileSystem fsTarget2; Path targetTestRoot2; @Override protected FileSystemTestHelper createFileSystemHelper() { return new FileSystemTestHelper("/tmp/TestViewFileSystemHdfs"); } @BeforeClass public static void clusterSetupAtBegining() throws IOException, LoginException, URISyntaxException { SupportsBlocks = true; CONF.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); cluster = new MiniDFSCluster.Builder(CONF).nnTopology( MiniDFSNNTopology.simpleFederatedTopology(2)) .numDataNodes(2) .build(); cluster.waitClusterUp(); fHdfs = cluster.getFileSystem(0); fHdfs2 = cluster.getFileSystem(1); fHdfs.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString()); fHdfs2.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString()); defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); defaultWorkingDirectory2 = fHdfs2.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fHdfs.mkdirs(defaultWorkingDirectory); fHdfs2.mkdirs(defaultWorkingDirectory2); } @AfterClass public static void ClusterShutdownAtEnd() throws Exception { cluster.shutdown(); } @Override @Before public void setUp() throws Exception { // create the test root on local_fs fsTarget = fHdfs; fsTarget2 = fHdfs2; targetTestRoot2 = new FileSystemTestHelper().getAbsoluteTestRootPath(fsTarget2); super.setUp(); } @Override @After public void tearDown() throws Exception { super.tearDown(); } @Override void setupMountPoints() { super.setupMountPoints(); ConfigUtil.addLink(conf, "/mountOnNn2", new Path(targetTestRoot2, "mountOnNn2").toUri()); } // Overriden test helper methods - changed values based on hdfs and the // additional mount. @Override int getExpectedDirPaths() { return 8; } @Override int getExpectedMountPoints() { return 9; } @Override int getExpectedDelegationTokenCount() { return 2; // Mount points to 2 unique hdfs } @Override int getExpectedDelegationTokenCountWithCredentials() { return 2; } }
apache-2.0
hggliu/iosched
third_party/glide/library/src/main/java/com/bumptech/glide/load/resource/bitmap/FitCenter.java
1310
package com.bumptech.glide.load.resource.bitmap; import android.graphics.Bitmap; import com.bumptech.glide.Resource; import com.bumptech.glide.load.engine.bitmap_recycle.BitmapPool; import com.bumptech.glide.load.Transformation; /** * Scale the image uniformly (maintaining the image's aspect ratio) so that one of the dimensions of the image * will be equal to the given dimension and the other will be less than the given dimension */ public class FitCenter implements Transformation<Bitmap> { private BitmapPool pool; public FitCenter(BitmapPool pool) { this.pool = pool; } @Override public Resource<Bitmap> transform(Resource<Bitmap> resource, int outWidth, int outHeight) { if (outWidth <= 0 || outHeight <= 0) { throw new IllegalArgumentException("Cannot fit center image to within width=" + outWidth + " or height=" + outHeight); } Bitmap transformed = TransformationUtils.fitCenter(resource.get(), pool, outWidth, outHeight); if (transformed == resource.get()) { return resource; } else { return new BitmapResource(transformed, pool); } } @Override public String getId() { return "FitCenter.com.bumptech.glide.load.Transformation"; } }
apache-2.0
tseen/Federated-HDFS
tseenliu/FedHDFS-hadoop-src/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebApp.java
3159
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.hs.webapp; import static org.apache.hadoop.yarn.util.StringHelper.pajoin; import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_OWNER; import static org.apache.hadoop.yarn.webapp.YarnWebParams.CONTAINER_ID; import static org.apache.hadoop.yarn.webapp.YarnWebParams.CONTAINER_LOG_TYPE; import static org.apache.hadoop.yarn.webapp.YarnWebParams.ENTITY_STRING; import static org.apache.hadoop.yarn.webapp.YarnWebParams.NM_NODENAME; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.webapp.AMParams; import org.apache.hadoop.mapreduce.v2.hs.HistoryContext; import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; import org.apache.hadoop.yarn.webapp.WebApp; public class HsWebApp extends WebApp implements AMParams { private HistoryContext history; public HsWebApp(HistoryContext history) { this.history = history; } @Override public void setup() { bind(HsWebServices.class); bind(JAXBContextResolver.class); bind(GenericExceptionHandler.class); bind(AppContext.class).toInstance(history); bind(HistoryContext.class).toInstance(history); route("/", HsController.class); route("/app", HsController.class); route(pajoin("/job", JOB_ID), HsController.class, "job"); route(pajoin("/conf", JOB_ID), HsController.class, "conf"); route(pajoin("/jobcounters", JOB_ID), HsController.class, "jobCounters"); route(pajoin("/singlejobcounter",JOB_ID, COUNTER_GROUP, COUNTER_NAME), HsController.class, "singleJobCounter"); route(pajoin("/tasks", JOB_ID, TASK_TYPE), HsController.class, "tasks"); route(pajoin("/attempts", JOB_ID, TASK_TYPE, ATTEMPT_STATE), HsController.class, "attempts"); route(pajoin("/task", TASK_ID), HsController.class, "task"); route(pajoin("/taskcounters", TASK_ID), HsController.class, "taskCounters"); route(pajoin("/singletaskcounter",TASK_ID, COUNTER_GROUP, COUNTER_NAME), HsController.class, "singleTaskCounter"); route("/about", HsController.class, "about"); route(pajoin("/logs", NM_NODENAME, CONTAINER_ID, ENTITY_STRING, APP_OWNER, CONTAINER_LOG_TYPE), HsController.class, "logs"); route(pajoin("/nmlogs", NM_NODENAME, CONTAINER_ID, ENTITY_STRING, APP_OWNER, CONTAINER_LOG_TYPE), HsController.class, "nmlogs"); } }
apache-2.0
SanDisk-Open-Source/SSD_Dashboard
uefi/gcc/gcc-4.6.3/libjava/classpath/gnu/javax/security/auth/Password.java
9682
/* Password.java -- opaque wrapper around a password. Copyright (C) 2004, 2006 Free Software Foundation, Inc. This file is a part of GNU Classpath. GNU Classpath is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. GNU Classpath is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU Classpath; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License cover the whole combination. As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so. If you do not wish to do so, delete this exception statement from your version. */ package gnu.javax.security.auth; import gnu.java.security.util.ExpirableObject; /** * Immutible, though destroyable, password class. * * <p>Extends {@link ExpirableObject}, implementing {@link doDestroy()} * in which encapsulated {@link char[]}, and {@link byte[]} password fields * are cleared (elements set to zero) in order to thwart memory heap * snooping. */ public final class Password extends ExpirableObject { // Constants and variables // ------------------------------------------------------------------------- /** * Password stored in {@link char[]} format. */ private final char[] password; /** * Password stored in {@link byte[]} format. */ private final byte[] bPassword; /** * Indicates whether this Password object's {@link doDestroy()} method has * been called. See also, {@link ExpirableObject#Destroy()}. */ private boolean mIsDestroyed = false; // Constructor(s) // ------------------------------------------------------------------------- /** * Create a new expirable Password object that will expire after the * default timeout {@link ExpirableObject#DEFAULT_TIMEOUT}. * * @param password The character array password to associate with this * Password object. */ public Password (char[] password) { this (password, 0, password.length, DEFAULT_TIMEOUT); } /** * Create a new expirable Password object that will expire after the * timeout denoted by constructor parameter, <i>delay</i>. * * @param password The character array password to associate with this * Password object. * @param delay The number of miliseconds before this Password object * will be automatically destroyed. */ public Password (char[] password, long delay) { this (password, 0, password.length, delay); } /** * Create a new expirable Password object that will expire after the * default timeout {@link ExpirableObject#DEFAULT_TIMEOUT}. * * @param password The character array password to associate with this * Password object. * @param offset The <i>password</i> character array parameter element * marking the beginning of the contained password string. * @param length The number of characters, beginning at <i>offset</i>, * to be copied into this object's {@link password} field. */ public Password (char[] password, int offset, int length) { this (password, offset, length, DEFAULT_TIMEOUT); } /** * Create a new expirable Password object that will expire after the * timeout denoted by constructor parameter, <i>delay</i>. * * @param password The character array password to associate with this * Password object. * @param offset The <i>password</i> character array parameter element * marking the beginning of the contained password string. * @param length The number of characters, beginning at <i>offset</i>, * to be copied into this object's {@link password} field. * @param delay The number of miliseconds before this Password object * will be automatically destroyed. */ public Password (char[] password, int offset, int length, long delay) { super (delay); if (offset < 0 || length < 0 || offset + length > password.length) throw new ArrayIndexOutOfBoundsException ("off=" + offset + " length=" + length + " array.length=" + password.length); int i, j; this.password = new char[length]; bPassword = new byte[length]; for(i = 0, j = offset; i < length; i++, j++) { this.password[i] = password[j]; // XXX this should use character encodings, other than ASCII. bPassword[i] = (byte) (password[j] & 0x7F); } } /** * Create a new expirable Password object that will expire after the * default timeout {@link ExpirableObject#DEFAULT_TIMEOUT}. * * @param password The byte array password to associate with this * Password object. */ public Password (byte[] password) { this (password, 0, password.length, DEFAULT_TIMEOUT); } /** * Create a new expirable Password object that will expire after the * timeout denoted by constructor parameter, <i>delay</i>. * * @param password The byte array password to associate with this * Password object. * @param delay The number of miliseconds before this Password object * will be automatically destroyed. */ public Password (byte[] password, long delay) { this (password, 0, password.length, delay); } /** * Create a new expirable Password object that will expire after the * default timeout {@link ExpirableObject#DEFAULT_TIMEOUT}. * * @param password The byte array password to associate with this * Password object. * @param offset The <i>password</i> byte array parameter element * marking the beginning of the contained password string. * @param length The number of bytes, beginning at <i>offset</i>, * to be copied into this object's {@link password} field. */ public Password (byte[] password, int offset, int length) { this (password, offset, length, DEFAULT_TIMEOUT); } /** * Create a new expirable Password object that will expire after the * timeout denoted by constructor parameter, <i>delay</i>. * * @param password The byte array password to associate with this * Password object. * @param offset The <i>password</i> byte array parameter element * marking the beginning of the contained password string. * @param length The number of bytes, beginning at <i>offset</i>, * to be copied into this object's {@link bPassword} field. * @param delay The number of miliseconds before this Password object * will be automatically destroyed. */ public Password (byte[] password, int offset, int length, long delay) { super (delay); if (offset < 0 || length < 0 || offset + length > password.length) throw new ArrayIndexOutOfBoundsException ("off=" + offset + " length=" + length + " array.length=" + password.length); int i, j; this.password = new char[length]; bPassword = new byte[length]; for (i = 0, j = offset; i < length; i++, j++) { this.password[i] = (char) password[j]; bPassword[i] = password[j]; } } // Instance methods // ------------------------------------------------------------------------- /** * Returns a reference to the {@link char[]} password storage field, * {@link password}. */ public synchronized char[] getPassword() { if (mIsDestroyed) throw new IllegalStateException ("Attempted destroyed password access."); return password; } /** * Returns a reference to the {@link byte[]} password storage field, * {@link bPassword}. */ public synchronized byte[] getBytes() { if (mIsDestroyed) throw new IllegalStateException ("Attempted destroyed password access."); return bPassword; } /** * Sets password field char[], and byte[] array elements to zero. * This method implements base class {@link ExpirableObject} abstract * method, {@link ExpirableObject#doDestroy()}. See also, * {@link ExpirableObject#destroy()}. */ protected synchronized void doDestroy() { if (isDestroyed()) return; else { for (int i = 0; i < password.length; i++) password[i] = 0; for (int i = 0; i < bPassword.length; i++) bPassword[i] = 0; mIsDestroyed = true; } } /** * Returns true, or false relative to whether, or not this object's * {@link doDestroy()} method has been called. See also, * {@ExpirableObject#destroy()}. */ public synchronized boolean isDestroyed() { return (mIsDestroyed); } }
gpl-2.0
yuweijun/learning-programming
dubbo/dubbo-registry-simple/src/main/java/com/alibaba/dubbo/registry/simple/SimpleRegistryService.java
8098
/* * Copyright 1999-2011 Alibaba Group. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.dubbo.registry.simple; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import com.alibaba.dubbo.common.Constants; import com.alibaba.dubbo.common.URL; import com.alibaba.dubbo.common.logger.Logger; import com.alibaba.dubbo.common.logger.LoggerFactory; import com.alibaba.dubbo.common.utils.ConcurrentHashSet; import com.alibaba.dubbo.common.utils.NetUtils; import com.alibaba.dubbo.common.utils.UrlUtils; import com.alibaba.dubbo.registry.NotifyListener; import com.alibaba.dubbo.registry.RegistryService; import com.alibaba.dubbo.registry.support.AbstractRegistry; import com.alibaba.dubbo.rpc.RpcContext; /** * SimpleRegistryService * * @author william.liangf */ public class SimpleRegistryService extends AbstractRegistry { private final ConcurrentMap<String, Set<URL>> remoteRegistered = new ConcurrentHashMap<String, Set<URL>>(); private final ConcurrentMap<String, ConcurrentMap<URL, Set<NotifyListener>>> remoteSubscribed = new ConcurrentHashMap<String, ConcurrentMap<URL, Set<NotifyListener>>>(); private final static Logger logger = LoggerFactory.getLogger(SimpleRegistryService.class); public SimpleRegistryService() { super(new URL("dubbo", NetUtils.getLocalHost(), 0, RegistryService.class.getName(), "file", "N/A")); } public boolean isAvailable() { return true; } public List<URL> lookup(URL url) { List<URL> urls = new ArrayList<URL>(); for (URL u: getRegistered()) { if (UrlUtils.isMatch(url, u)) { urls.add(u); } } return urls; } public void register(URL url) { String client = RpcContext.getContext().getRemoteAddressString(); Set<URL> urls = remoteRegistered.get(client); if (urls == null) { remoteRegistered.putIfAbsent(client, new ConcurrentHashSet<URL>()); urls = remoteRegistered.get(client); } urls.add(url); super.register(url); registered(url); } public void unregister(URL url) { String client = RpcContext.getContext().getRemoteAddressString(); Set<URL> urls = remoteRegistered.get(client); if (urls != null && urls.size() > 0) { urls.remove(url); } super.unregister(url); unregistered(url); } public void subscribe(URL url, NotifyListener listener) { if (getUrl().getPort() == 0) { URL registryUrl = RpcContext.getContext().getUrl(); if (registryUrl != null && registryUrl.getPort() > 0 && RegistryService.class.getName().equals(registryUrl.getPath())) { super.setUrl(registryUrl); super.register(registryUrl); } } String client = RpcContext.getContext().getRemoteAddressString(); ConcurrentMap<URL, Set<NotifyListener>> clientListeners = remoteSubscribed.get(client); if (clientListeners == null) { remoteSubscribed.putIfAbsent(client, new ConcurrentHashMap<URL, Set<NotifyListener>>()); clientListeners = remoteSubscribed.get(client); } Set<NotifyListener> listeners = clientListeners.get(url); if (listeners == null) { clientListeners.putIfAbsent(url, new ConcurrentHashSet<NotifyListener>()); listeners = clientListeners.get(url); } listeners.add(listener); super.subscribe(url, listener); subscribed(url, listener); } public void unsubscribe(URL url, NotifyListener listener) { if (! Constants.ANY_VALUE.equals(url.getServiceInterface()) && url.getParameter(Constants.REGISTER_KEY, true)) { unregister(url); } String client = RpcContext.getContext().getRemoteAddressString(); Map<URL, Set<NotifyListener>> clientListeners = remoteSubscribed.get(client); if (clientListeners != null && clientListeners.size() > 0) { Set<NotifyListener> listeners = clientListeners.get(url); if (listeners != null && listeners.size() > 0) { listeners.remove(listener); } } } protected void registered(URL url) { for (Map.Entry<URL, Set<NotifyListener>> entry : getSubscribed().entrySet()) { URL key = entry.getKey(); if (UrlUtils.isMatch(key, url)) { List<URL> list = lookup(key); for (NotifyListener listener : entry.getValue()) { listener.notify(list); } } } } protected void unregistered(URL url) { for (Map.Entry<URL, Set<NotifyListener>> entry : getSubscribed().entrySet()) { URL key = entry.getKey(); if (UrlUtils.isMatch(key, url)) { List<URL> list = lookup(key); for (NotifyListener listener : entry.getValue()) { listener.notify(list); } } } } protected void subscribed(final URL url, final NotifyListener listener) { if (Constants.ANY_VALUE.equals(url.getServiceInterface())) { new Thread(new Runnable() { public void run() { Map<String, List<URL>> map = new HashMap<String, List<URL>>(); for (URL u: getRegistered()) { if (UrlUtils.isMatch(url, u)) { String service = u.getServiceInterface(); List<URL> list = map.get(service); if (list == null) { list = new ArrayList<URL>(); map.put(service, list); } list.add(u); } } for (List<URL> list : map.values()) { try { listener.notify(list); } catch (Throwable e) { logger.warn("Discard to notify " + url.getServiceKey() + " to listener " + listener); } } } }, "DubboMonitorNotifier").start(); } else { List<URL> list = lookup(url); try { listener.notify(list); } catch (Throwable e) { logger.warn("Discard to notify " + url.getServiceKey() + " to listener " + listener); } } } public void disconnect() { String client = RpcContext.getContext().getRemoteAddressString(); if (logger.isInfoEnabled()) { logger.info("Disconnected " + client); } Set<URL> urls = remoteRegistered.get(client); if (urls != null && urls.size() > 0) { for (URL url : urls) { unregister(url); } } Map<URL, Set<NotifyListener>> listeners = remoteSubscribed.get(client); if (listeners != null && listeners.size() > 0) { for (Map.Entry<URL, Set<NotifyListener>> entry : listeners.entrySet()) { URL url = entry.getKey(); for (NotifyListener listener : entry.getValue()) { unsubscribe(url, listener); } } } } }
mit
cowthan/JavaAyo
src-netty/io/netty/example/worldclock/WorldClockProtocol.java
118030
/* * Copyright 2013 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: src/main/java/io/netty/example/worldclock/WorldClockProtocol.proto package io.netty.example.worldclock; @SuppressWarnings("all") public final class WorldClockProtocol { private WorldClockProtocol() {} public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { } /** * Protobuf enum {@code io.netty.example.worldclock.Continent} */ public enum Continent implements com.google.protobuf.ProtocolMessageEnum { /** * <code>AFRICA = 0;</code> */ AFRICA(0, 0), /** * <code>AMERICA = 1;</code> */ AMERICA(1, 1), /** * <code>ANTARCTICA = 2;</code> */ ANTARCTICA(2, 2), /** * <code>ARCTIC = 3;</code> */ ARCTIC(3, 3), /** * <code>ASIA = 4;</code> */ ASIA(4, 4), /** * <code>ATLANTIC = 5;</code> */ ATLANTIC(5, 5), /** * <code>AUSTRALIA = 6;</code> */ AUSTRALIA(6, 6), /** * <code>EUROPE = 7;</code> */ EUROPE(7, 7), /** * <code>INDIAN = 8;</code> */ INDIAN(8, 8), /** * <code>MIDEAST = 9;</code> */ MIDEAST(9, 9), /** * <code>PACIFIC = 10;</code> */ PACIFIC(10, 10), ; /** * <code>AFRICA = 0;</code> */ public static final int AFRICA_VALUE = 0; /** * <code>AMERICA = 1;</code> */ public static final int AMERICA_VALUE = 1; /** * <code>ANTARCTICA = 2;</code> */ public static final int ANTARCTICA_VALUE = 2; /** * <code>ARCTIC = 3;</code> */ public static final int ARCTIC_VALUE = 3; /** * <code>ASIA = 4;</code> */ public static final int ASIA_VALUE = 4; /** * <code>ATLANTIC = 5;</code> */ public static final int ATLANTIC_VALUE = 5; /** * <code>AUSTRALIA = 6;</code> */ public static final int AUSTRALIA_VALUE = 6; /** * <code>EUROPE = 7;</code> */ public static final int EUROPE_VALUE = 7; /** * <code>INDIAN = 8;</code> */ public static final int INDIAN_VALUE = 8; /** * <code>MIDEAST = 9;</code> */ public static final int MIDEAST_VALUE = 9; /** * <code>PACIFIC = 10;</code> */ public static final int PACIFIC_VALUE = 10; public final int getNumber() { return value; } public static Continent valueOf(int value) { switch (value) { case 0: return AFRICA; case 1: return AMERICA; case 2: return ANTARCTICA; case 3: return ARCTIC; case 4: return ASIA; case 5: return ATLANTIC; case 6: return AUSTRALIA; case 7: return EUROPE; case 8: return INDIAN; case 9: return MIDEAST; case 10: return PACIFIC; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap<Continent> internalGetValueMap() { return internalValueMap; } private static com.google.protobuf.Internal.EnumLiteMap<Continent> internalValueMap = new com.google.protobuf.Internal.EnumLiteMap<Continent>() { public Continent findValueByNumber(int number) { return Continent.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return io.netty.example.worldclock.WorldClockProtocol.getDescriptor().getEnumTypes().get(0); } private static final Continent[] VALUES = values(); public static Continent valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private Continent(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:io.netty.example.worldclock.Continent) } /** * Protobuf enum {@code io.netty.example.worldclock.DayOfWeek} */ public enum DayOfWeek implements com.google.protobuf.ProtocolMessageEnum { /** * <code>SUNDAY = 1;</code> */ SUNDAY(0, 1), /** * <code>MONDAY = 2;</code> */ MONDAY(1, 2), /** * <code>TUESDAY = 3;</code> */ TUESDAY(2, 3), /** * <code>WEDNESDAY = 4;</code> */ WEDNESDAY(3, 4), /** * <code>THURSDAY = 5;</code> */ THURSDAY(4, 5), /** * <code>FRIDAY = 6;</code> */ FRIDAY(5, 6), /** * <code>SATURDAY = 7;</code> */ SATURDAY(6, 7), ; /** * <code>SUNDAY = 1;</code> */ public static final int SUNDAY_VALUE = 1; /** * <code>MONDAY = 2;</code> */ public static final int MONDAY_VALUE = 2; /** * <code>TUESDAY = 3;</code> */ public static final int TUESDAY_VALUE = 3; /** * <code>WEDNESDAY = 4;</code> */ public static final int WEDNESDAY_VALUE = 4; /** * <code>THURSDAY = 5;</code> */ public static final int THURSDAY_VALUE = 5; /** * <code>FRIDAY = 6;</code> */ public static final int FRIDAY_VALUE = 6; /** * <code>SATURDAY = 7;</code> */ public static final int SATURDAY_VALUE = 7; public final int getNumber() { return value; } public static DayOfWeek valueOf(int value) { switch (value) { case 1: return SUNDAY; case 2: return MONDAY; case 3: return TUESDAY; case 4: return WEDNESDAY; case 5: return THURSDAY; case 6: return FRIDAY; case 7: return SATURDAY; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap<DayOfWeek> internalGetValueMap() { return internalValueMap; } private static com.google.protobuf.Internal.EnumLiteMap<DayOfWeek> internalValueMap = new com.google.protobuf.Internal.EnumLiteMap<DayOfWeek>() { public DayOfWeek findValueByNumber(int number) { return DayOfWeek.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return io.netty.example.worldclock.WorldClockProtocol.getDescriptor().getEnumTypes().get(1); } private static final DayOfWeek[] VALUES = values(); public static DayOfWeek valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private DayOfWeek(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:io.netty.example.worldclock.DayOfWeek) } public interface LocationOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .io.netty.example.worldclock.Continent continent = 1; /** * <code>required .io.netty.example.worldclock.Continent continent = 1;</code> */ boolean hasContinent(); /** * <code>required .io.netty.example.worldclock.Continent continent = 1;</code> */ io.netty.example.worldclock.WorldClockProtocol.Continent getContinent(); // required string city = 2; /** * <code>required string city = 2;</code> */ boolean hasCity(); /** * <code>required string city = 2;</code> */ java.lang.String getCity(); /** * <code>required string city = 2;</code> */ com.google.protobuf.ByteString getCityBytes(); } /** * Protobuf type {@code io.netty.example.worldclock.Location} */ public static final class Location extends com.google.protobuf.GeneratedMessage implements LocationOrBuilder { // Use Location.newBuilder() to construct. private Location(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private Location(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final Location defaultInstance; public static Location getDefaultInstance() { return defaultInstance; } public Location getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private Location( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { int rawValue = input.readEnum(); io.netty.example.worldclock.WorldClockProtocol.Continent value = io.netty.example.worldclock.WorldClockProtocol.Continent.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; continent_ = value; } break; } case 18: { bitField0_ |= 0x00000002; city_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_Location_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_Location_fieldAccessorTable .ensureFieldAccessorsInitialized( io.netty.example.worldclock.WorldClockProtocol.Location.class, io.netty.example.worldclock.WorldClockProtocol.Location.Builder.class); } public static com.google.protobuf.Parser<Location> PARSER = new com.google.protobuf.AbstractParser<Location>() { public Location parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new Location(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<Location> getParserForType() { return PARSER; } private int bitField0_; // required .io.netty.example.worldclock.Continent continent = 1; public static final int CONTINENT_FIELD_NUMBER = 1; private io.netty.example.worldclock.WorldClockProtocol.Continent continent_; /** * <code>required .io.netty.example.worldclock.Continent continent = 1;</code> */ public boolean hasContinent() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required .io.netty.example.worldclock.Continent continent = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.Continent getContinent() { return continent_; } // required string city = 2; public static final int CITY_FIELD_NUMBER = 2; private java.lang.Object city_; /** * <code>required string city = 2;</code> */ public boolean hasCity() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <code>required string city = 2;</code> */ public java.lang.String getCity() { java.lang.Object ref = city_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { city_ = s; } return s; } } /** * <code>required string city = 2;</code> */ public com.google.protobuf.ByteString getCityBytes() { java.lang.Object ref = city_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); city_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private void initFields() { continent_ = io.netty.example.worldclock.WorldClockProtocol.Continent.AFRICA; city_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasContinent()) { memoizedIsInitialized = 0; return false; } if (!hasCity()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeEnum(1, continent_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getCityBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(1, continent_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, getCityBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } public static io.netty.example.worldclock.WorldClockProtocol.Location parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.netty.example.worldclock.WorldClockProtocol.Location parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.Location parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.netty.example.worldclock.WorldClockProtocol.Location parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.Location parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static io.netty.example.worldclock.WorldClockProtocol.Location parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.Location parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static io.netty.example.worldclock.WorldClockProtocol.Location parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.Location parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static io.netty.example.worldclock.WorldClockProtocol.Location parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(io.netty.example.worldclock.WorldClockProtocol.Location prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code io.netty.example.worldclock.Location} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements io.netty.example.worldclock.WorldClockProtocol.LocationOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_Location_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_Location_fieldAccessorTable .ensureFieldAccessorsInitialized( io.netty.example.worldclock.WorldClockProtocol.Location.class, io.netty.example.worldclock.WorldClockProtocol.Location.Builder.class); } // Construct using io.netty.example.worldclock.WorldClockProtocol.Location.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); continent_ = io.netty.example.worldclock.WorldClockProtocol.Continent.AFRICA; bitField0_ = (bitField0_ & ~0x00000001); city_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_Location_descriptor; } public io.netty.example.worldclock.WorldClockProtocol.Location getDefaultInstanceForType() { return io.netty.example.worldclock.WorldClockProtocol.Location.getDefaultInstance(); } public io.netty.example.worldclock.WorldClockProtocol.Location build() { io.netty.example.worldclock.WorldClockProtocol.Location result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public io.netty.example.worldclock.WorldClockProtocol.Location buildPartial() { io.netty.example.worldclock.WorldClockProtocol.Location result = new io.netty.example.worldclock.WorldClockProtocol.Location(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.continent_ = continent_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.city_ = city_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof io.netty.example.worldclock.WorldClockProtocol.Location) { return mergeFrom((io.netty.example.worldclock.WorldClockProtocol.Location)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(io.netty.example.worldclock.WorldClockProtocol.Location other) { if (other == io.netty.example.worldclock.WorldClockProtocol.Location.getDefaultInstance()) return this; if (other.hasContinent()) { setContinent(other.getContinent()); } if (other.hasCity()) { bitField0_ |= 0x00000002; city_ = other.city_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasContinent()) { return false; } if (!hasCity()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { io.netty.example.worldclock.WorldClockProtocol.Location parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (io.netty.example.worldclock.WorldClockProtocol.Location) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .io.netty.example.worldclock.Continent continent = 1; private io.netty.example.worldclock.WorldClockProtocol.Continent continent_ = io.netty.example.worldclock.WorldClockProtocol.Continent.AFRICA; /** * <code>required .io.netty.example.worldclock.Continent continent = 1;</code> */ public boolean hasContinent() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required .io.netty.example.worldclock.Continent continent = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.Continent getContinent() { return continent_; } /** * <code>required .io.netty.example.worldclock.Continent continent = 1;</code> */ public Builder setContinent(io.netty.example.worldclock.WorldClockProtocol.Continent value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; continent_ = value; onChanged(); return this; } /** * <code>required .io.netty.example.worldclock.Continent continent = 1;</code> */ public Builder clearContinent() { bitField0_ = (bitField0_ & ~0x00000001); continent_ = io.netty.example.worldclock.WorldClockProtocol.Continent.AFRICA; onChanged(); return this; } // required string city = 2; private java.lang.Object city_ = ""; /** * <code>required string city = 2;</code> */ public boolean hasCity() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <code>required string city = 2;</code> */ public java.lang.String getCity() { java.lang.Object ref = city_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); city_ = s; return s; } else { return (java.lang.String) ref; } } /** * <code>required string city = 2;</code> */ public com.google.protobuf.ByteString getCityBytes() { java.lang.Object ref = city_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); city_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * <code>required string city = 2;</code> */ public Builder setCity( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; city_ = value; onChanged(); return this; } /** * <code>required string city = 2;</code> */ public Builder clearCity() { bitField0_ = (bitField0_ & ~0x00000002); city_ = getDefaultInstance().getCity(); onChanged(); return this; } /** * <code>required string city = 2;</code> */ public Builder setCityBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; city_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:io.netty.example.worldclock.Location) } static { defaultInstance = new Location(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:io.netty.example.worldclock.Location) } public interface LocationsOrBuilder extends com.google.protobuf.MessageOrBuilder { // repeated .io.netty.example.worldclock.Location location = 1; /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ java.util.List<io.netty.example.worldclock.WorldClockProtocol.Location> getLocationList(); /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ io.netty.example.worldclock.WorldClockProtocol.Location getLocation(int index); /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ int getLocationCount(); /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ java.util.List<? extends io.netty.example.worldclock.WorldClockProtocol.LocationOrBuilder> getLocationOrBuilderList(); /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ io.netty.example.worldclock.WorldClockProtocol.LocationOrBuilder getLocationOrBuilder( int index); } /** * Protobuf type {@code io.netty.example.worldclock.Locations} */ public static final class Locations extends com.google.protobuf.GeneratedMessage implements LocationsOrBuilder { // Use Locations.newBuilder() to construct. private Locations(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private Locations(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final Locations defaultInstance; public static Locations getDefaultInstance() { return defaultInstance; } public Locations getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private Locations( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { location_ = new java.util.ArrayList<io.netty.example.worldclock.WorldClockProtocol.Location>(); mutable_bitField0_ |= 0x00000001; } location_.add(input.readMessage(io.netty.example.worldclock.WorldClockProtocol.Location.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { location_ = java.util.Collections.unmodifiableList(location_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_Locations_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_Locations_fieldAccessorTable .ensureFieldAccessorsInitialized( io.netty.example.worldclock.WorldClockProtocol.Locations.class, io.netty.example.worldclock.WorldClockProtocol.Locations.Builder.class); } public static com.google.protobuf.Parser<Locations> PARSER = new com.google.protobuf.AbstractParser<Locations>() { public Locations parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new Locations(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<Locations> getParserForType() { return PARSER; } // repeated .io.netty.example.worldclock.Location location = 1; public static final int LOCATION_FIELD_NUMBER = 1; private java.util.List<io.netty.example.worldclock.WorldClockProtocol.Location> location_; /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public java.util.List<io.netty.example.worldclock.WorldClockProtocol.Location> getLocationList() { return location_; } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public java.util.List<? extends io.netty.example.worldclock.WorldClockProtocol.LocationOrBuilder> getLocationOrBuilderList() { return location_; } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public int getLocationCount() { return location_.size(); } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.Location getLocation(int index) { return location_.get(index); } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.LocationOrBuilder getLocationOrBuilder( int index) { return location_.get(index); } private void initFields() { location_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; for (int i = 0; i < getLocationCount(); i++) { if (!getLocation(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < location_.size(); i++) { output.writeMessage(1, location_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < location_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, location_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.Locations parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static io.netty.example.worldclock.WorldClockProtocol.Locations parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(io.netty.example.worldclock.WorldClockProtocol.Locations prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code io.netty.example.worldclock.Locations} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements io.netty.example.worldclock.WorldClockProtocol.LocationsOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_Locations_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_Locations_fieldAccessorTable .ensureFieldAccessorsInitialized( io.netty.example.worldclock.WorldClockProtocol.Locations.class, io.netty.example.worldclock.WorldClockProtocol.Locations.Builder.class); } // Construct using io.netty.example.worldclock.WorldClockProtocol.Locations.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getLocationFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (locationBuilder_ == null) { location_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { locationBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_Locations_descriptor; } public io.netty.example.worldclock.WorldClockProtocol.Locations getDefaultInstanceForType() { return io.netty.example.worldclock.WorldClockProtocol.Locations.getDefaultInstance(); } public io.netty.example.worldclock.WorldClockProtocol.Locations build() { io.netty.example.worldclock.WorldClockProtocol.Locations result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public io.netty.example.worldclock.WorldClockProtocol.Locations buildPartial() { io.netty.example.worldclock.WorldClockProtocol.Locations result = new io.netty.example.worldclock.WorldClockProtocol.Locations(this); int from_bitField0_ = bitField0_; if (locationBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { location_ = java.util.Collections.unmodifiableList(location_); bitField0_ = (bitField0_ & ~0x00000001); } result.location_ = location_; } else { result.location_ = locationBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof io.netty.example.worldclock.WorldClockProtocol.Locations) { return mergeFrom((io.netty.example.worldclock.WorldClockProtocol.Locations)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(io.netty.example.worldclock.WorldClockProtocol.Locations other) { if (other == io.netty.example.worldclock.WorldClockProtocol.Locations.getDefaultInstance()) return this; if (locationBuilder_ == null) { if (!other.location_.isEmpty()) { if (location_.isEmpty()) { location_ = other.location_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureLocationIsMutable(); location_.addAll(other.location_); } onChanged(); } } else { if (!other.location_.isEmpty()) { if (locationBuilder_.isEmpty()) { locationBuilder_.dispose(); locationBuilder_ = null; location_ = other.location_; bitField0_ = (bitField0_ & ~0x00000001); locationBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getLocationFieldBuilder() : null; } else { locationBuilder_.addAllMessages(other.location_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { for (int i = 0; i < getLocationCount(); i++) { if (!getLocation(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { io.netty.example.worldclock.WorldClockProtocol.Locations parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (io.netty.example.worldclock.WorldClockProtocol.Locations) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .io.netty.example.worldclock.Location location = 1; private java.util.List<io.netty.example.worldclock.WorldClockProtocol.Location> location_ = java.util.Collections.emptyList(); private void ensureLocationIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { location_ = new java.util.ArrayList<io.netty.example.worldclock.WorldClockProtocol.Location>(location_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< io.netty.example.worldclock.WorldClockProtocol.Location, io.netty.example.worldclock.WorldClockProtocol.Location.Builder, io.netty.example.worldclock.WorldClockProtocol.LocationOrBuilder> locationBuilder_; /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public java.util.List<io.netty.example.worldclock.WorldClockProtocol.Location> getLocationList() { if (locationBuilder_ == null) { return java.util.Collections.unmodifiableList(location_); } else { return locationBuilder_.getMessageList(); } } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public int getLocationCount() { if (locationBuilder_ == null) { return location_.size(); } else { return locationBuilder_.getCount(); } } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.Location getLocation(int index) { if (locationBuilder_ == null) { return location_.get(index); } else { return locationBuilder_.getMessage(index); } } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public Builder setLocation( int index, io.netty.example.worldclock.WorldClockProtocol.Location value) { if (locationBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLocationIsMutable(); location_.set(index, value); onChanged(); } else { locationBuilder_.setMessage(index, value); } return this; } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public Builder setLocation( int index, io.netty.example.worldclock.WorldClockProtocol.Location.Builder builderForValue) { if (locationBuilder_ == null) { ensureLocationIsMutable(); location_.set(index, builderForValue.build()); onChanged(); } else { locationBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public Builder addLocation(io.netty.example.worldclock.WorldClockProtocol.Location value) { if (locationBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLocationIsMutable(); location_.add(value); onChanged(); } else { locationBuilder_.addMessage(value); } return this; } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public Builder addLocation( int index, io.netty.example.worldclock.WorldClockProtocol.Location value) { if (locationBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLocationIsMutable(); location_.add(index, value); onChanged(); } else { locationBuilder_.addMessage(index, value); } return this; } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public Builder addLocation( io.netty.example.worldclock.WorldClockProtocol.Location.Builder builderForValue) { if (locationBuilder_ == null) { ensureLocationIsMutable(); location_.add(builderForValue.build()); onChanged(); } else { locationBuilder_.addMessage(builderForValue.build()); } return this; } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public Builder addLocation( int index, io.netty.example.worldclock.WorldClockProtocol.Location.Builder builderForValue) { if (locationBuilder_ == null) { ensureLocationIsMutable(); location_.add(index, builderForValue.build()); onChanged(); } else { locationBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public Builder addAllLocation( java.lang.Iterable<? extends io.netty.example.worldclock.WorldClockProtocol.Location> values) { if (locationBuilder_ == null) { ensureLocationIsMutable(); super.addAll(values, location_); onChanged(); } else { locationBuilder_.addAllMessages(values); } return this; } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public Builder clearLocation() { if (locationBuilder_ == null) { location_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { locationBuilder_.clear(); } return this; } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public Builder removeLocation(int index) { if (locationBuilder_ == null) { ensureLocationIsMutable(); location_.remove(index); onChanged(); } else { locationBuilder_.remove(index); } return this; } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.Location.Builder getLocationBuilder( int index) { return getLocationFieldBuilder().getBuilder(index); } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.LocationOrBuilder getLocationOrBuilder( int index) { if (locationBuilder_ == null) { return location_.get(index); } else { return locationBuilder_.getMessageOrBuilder(index); } } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public java.util.List<? extends io.netty.example.worldclock.WorldClockProtocol.LocationOrBuilder> getLocationOrBuilderList() { if (locationBuilder_ != null) { return locationBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(location_); } } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.Location.Builder addLocationBuilder() { return getLocationFieldBuilder().addBuilder( io.netty.example.worldclock.WorldClockProtocol.Location.getDefaultInstance()); } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.Location.Builder addLocationBuilder( int index) { return getLocationFieldBuilder().addBuilder( index, io.netty.example.worldclock.WorldClockProtocol.Location.getDefaultInstance()); } /** * <code>repeated .io.netty.example.worldclock.Location location = 1;</code> */ public java.util.List<io.netty.example.worldclock.WorldClockProtocol.Location.Builder> getLocationBuilderList() { return getLocationFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< io.netty.example.worldclock.WorldClockProtocol.Location, io.netty.example.worldclock.WorldClockProtocol.Location.Builder, io.netty.example.worldclock.WorldClockProtocol.LocationOrBuilder> getLocationFieldBuilder() { if (locationBuilder_ == null) { locationBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< io.netty.example.worldclock.WorldClockProtocol.Location, io.netty.example.worldclock.WorldClockProtocol.Location.Builder, io.netty.example.worldclock.WorldClockProtocol.LocationOrBuilder>( location_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); location_ = null; } return locationBuilder_; } // @@protoc_insertion_point(builder_scope:io.netty.example.worldclock.Locations) } static { defaultInstance = new Locations(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:io.netty.example.worldclock.Locations) } public interface LocalTimeOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint32 year = 1; /** * <code>required uint32 year = 1;</code> */ boolean hasYear(); /** * <code>required uint32 year = 1;</code> */ int getYear(); // required uint32 month = 2; /** * <code>required uint32 month = 2;</code> */ boolean hasMonth(); /** * <code>required uint32 month = 2;</code> */ int getMonth(); // required uint32 dayOfMonth = 4; /** * <code>required uint32 dayOfMonth = 4;</code> */ boolean hasDayOfMonth(); /** * <code>required uint32 dayOfMonth = 4;</code> */ int getDayOfMonth(); // required .io.netty.example.worldclock.DayOfWeek dayOfWeek = 5; /** * <code>required .io.netty.example.worldclock.DayOfWeek dayOfWeek = 5;</code> */ boolean hasDayOfWeek(); /** * <code>required .io.netty.example.worldclock.DayOfWeek dayOfWeek = 5;</code> */ io.netty.example.worldclock.WorldClockProtocol.DayOfWeek getDayOfWeek(); // required uint32 hour = 6; /** * <code>required uint32 hour = 6;</code> */ boolean hasHour(); /** * <code>required uint32 hour = 6;</code> */ int getHour(); // required uint32 minute = 7; /** * <code>required uint32 minute = 7;</code> */ boolean hasMinute(); /** * <code>required uint32 minute = 7;</code> */ int getMinute(); // required uint32 second = 8; /** * <code>required uint32 second = 8;</code> */ boolean hasSecond(); /** * <code>required uint32 second = 8;</code> */ int getSecond(); } /** * Protobuf type {@code io.netty.example.worldclock.LocalTime} */ public static final class LocalTime extends com.google.protobuf.GeneratedMessage implements LocalTimeOrBuilder { // Use LocalTime.newBuilder() to construct. private LocalTime(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private LocalTime(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final LocalTime defaultInstance; public static LocalTime getDefaultInstance() { return defaultInstance; } public LocalTime getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private LocalTime( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; year_ = input.readUInt32(); break; } case 16: { bitField0_ |= 0x00000002; month_ = input.readUInt32(); break; } case 32: { bitField0_ |= 0x00000004; dayOfMonth_ = input.readUInt32(); break; } case 40: { int rawValue = input.readEnum(); io.netty.example.worldclock.WorldClockProtocol.DayOfWeek value = io.netty.example.worldclock.WorldClockProtocol.DayOfWeek.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(5, rawValue); } else { bitField0_ |= 0x00000008; dayOfWeek_ = value; } break; } case 48: { bitField0_ |= 0x00000010; hour_ = input.readUInt32(); break; } case 56: { bitField0_ |= 0x00000020; minute_ = input.readUInt32(); break; } case 64: { bitField0_ |= 0x00000040; second_ = input.readUInt32(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_LocalTime_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_LocalTime_fieldAccessorTable .ensureFieldAccessorsInitialized( io.netty.example.worldclock.WorldClockProtocol.LocalTime.class, io.netty.example.worldclock.WorldClockProtocol.LocalTime.Builder.class); } public static com.google.protobuf.Parser<LocalTime> PARSER = new com.google.protobuf.AbstractParser<LocalTime>() { public LocalTime parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new LocalTime(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<LocalTime> getParserForType() { return PARSER; } private int bitField0_; // required uint32 year = 1; public static final int YEAR_FIELD_NUMBER = 1; private int year_; /** * <code>required uint32 year = 1;</code> */ public boolean hasYear() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required uint32 year = 1;</code> */ public int getYear() { return year_; } // required uint32 month = 2; public static final int MONTH_FIELD_NUMBER = 2; private int month_; /** * <code>required uint32 month = 2;</code> */ public boolean hasMonth() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <code>required uint32 month = 2;</code> */ public int getMonth() { return month_; } // required uint32 dayOfMonth = 4; public static final int DAYOFMONTH_FIELD_NUMBER = 4; private int dayOfMonth_; /** * <code>required uint32 dayOfMonth = 4;</code> */ public boolean hasDayOfMonth() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * <code>required uint32 dayOfMonth = 4;</code> */ public int getDayOfMonth() { return dayOfMonth_; } // required .io.netty.example.worldclock.DayOfWeek dayOfWeek = 5; public static final int DAYOFWEEK_FIELD_NUMBER = 5; private io.netty.example.worldclock.WorldClockProtocol.DayOfWeek dayOfWeek_; /** * <code>required .io.netty.example.worldclock.DayOfWeek dayOfWeek = 5;</code> */ public boolean hasDayOfWeek() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * <code>required .io.netty.example.worldclock.DayOfWeek dayOfWeek = 5;</code> */ public io.netty.example.worldclock.WorldClockProtocol.DayOfWeek getDayOfWeek() { return dayOfWeek_; } // required uint32 hour = 6; public static final int HOUR_FIELD_NUMBER = 6; private int hour_; /** * <code>required uint32 hour = 6;</code> */ public boolean hasHour() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * <code>required uint32 hour = 6;</code> */ public int getHour() { return hour_; } // required uint32 minute = 7; public static final int MINUTE_FIELD_NUMBER = 7; private int minute_; /** * <code>required uint32 minute = 7;</code> */ public boolean hasMinute() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * <code>required uint32 minute = 7;</code> */ public int getMinute() { return minute_; } // required uint32 second = 8; public static final int SECOND_FIELD_NUMBER = 8; private int second_; /** * <code>required uint32 second = 8;</code> */ public boolean hasSecond() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * <code>required uint32 second = 8;</code> */ public int getSecond() { return second_; } private void initFields() { year_ = 0; month_ = 0; dayOfMonth_ = 0; dayOfWeek_ = io.netty.example.worldclock.WorldClockProtocol.DayOfWeek.SUNDAY; hour_ = 0; minute_ = 0; second_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasYear()) { memoizedIsInitialized = 0; return false; } if (!hasMonth()) { memoizedIsInitialized = 0; return false; } if (!hasDayOfMonth()) { memoizedIsInitialized = 0; return false; } if (!hasDayOfWeek()) { memoizedIsInitialized = 0; return false; } if (!hasHour()) { memoizedIsInitialized = 0; return false; } if (!hasMinute()) { memoizedIsInitialized = 0; return false; } if (!hasSecond()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt32(1, year_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt32(2, month_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt32(4, dayOfMonth_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeEnum(5, dayOfWeek_.getNumber()); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeUInt32(6, hour_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeUInt32(7, minute_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeUInt32(8, second_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(1, year_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(2, month_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(4, dayOfMonth_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(5, dayOfWeek_.getNumber()); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(6, hour_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(7, minute_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(8, second_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTime parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTime parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTime parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTime parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTime parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTime parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTime parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTime parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTime parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTime parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(io.netty.example.worldclock.WorldClockProtocol.LocalTime prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code io.netty.example.worldclock.LocalTime} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements io.netty.example.worldclock.WorldClockProtocol.LocalTimeOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_LocalTime_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_LocalTime_fieldAccessorTable .ensureFieldAccessorsInitialized( io.netty.example.worldclock.WorldClockProtocol.LocalTime.class, io.netty.example.worldclock.WorldClockProtocol.LocalTime.Builder.class); } // Construct using io.netty.example.worldclock.WorldClockProtocol.LocalTime.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); year_ = 0; bitField0_ = (bitField0_ & ~0x00000001); month_ = 0; bitField0_ = (bitField0_ & ~0x00000002); dayOfMonth_ = 0; bitField0_ = (bitField0_ & ~0x00000004); dayOfWeek_ = io.netty.example.worldclock.WorldClockProtocol.DayOfWeek.SUNDAY; bitField0_ = (bitField0_ & ~0x00000008); hour_ = 0; bitField0_ = (bitField0_ & ~0x00000010); minute_ = 0; bitField0_ = (bitField0_ & ~0x00000020); second_ = 0; bitField0_ = (bitField0_ & ~0x00000040); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_LocalTime_descriptor; } public io.netty.example.worldclock.WorldClockProtocol.LocalTime getDefaultInstanceForType() { return io.netty.example.worldclock.WorldClockProtocol.LocalTime.getDefaultInstance(); } public io.netty.example.worldclock.WorldClockProtocol.LocalTime build() { io.netty.example.worldclock.WorldClockProtocol.LocalTime result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public io.netty.example.worldclock.WorldClockProtocol.LocalTime buildPartial() { io.netty.example.worldclock.WorldClockProtocol.LocalTime result = new io.netty.example.worldclock.WorldClockProtocol.LocalTime(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.year_ = year_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.month_ = month_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.dayOfMonth_ = dayOfMonth_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.dayOfWeek_ = dayOfWeek_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.hour_ = hour_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.minute_ = minute_; if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000040; } result.second_ = second_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof io.netty.example.worldclock.WorldClockProtocol.LocalTime) { return mergeFrom((io.netty.example.worldclock.WorldClockProtocol.LocalTime)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(io.netty.example.worldclock.WorldClockProtocol.LocalTime other) { if (other == io.netty.example.worldclock.WorldClockProtocol.LocalTime.getDefaultInstance()) return this; if (other.hasYear()) { setYear(other.getYear()); } if (other.hasMonth()) { setMonth(other.getMonth()); } if (other.hasDayOfMonth()) { setDayOfMonth(other.getDayOfMonth()); } if (other.hasDayOfWeek()) { setDayOfWeek(other.getDayOfWeek()); } if (other.hasHour()) { setHour(other.getHour()); } if (other.hasMinute()) { setMinute(other.getMinute()); } if (other.hasSecond()) { setSecond(other.getSecond()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasYear()) { return false; } if (!hasMonth()) { return false; } if (!hasDayOfMonth()) { return false; } if (!hasDayOfWeek()) { return false; } if (!hasHour()) { return false; } if (!hasMinute()) { return false; } if (!hasSecond()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { io.netty.example.worldclock.WorldClockProtocol.LocalTime parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (io.netty.example.worldclock.WorldClockProtocol.LocalTime) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint32 year = 1; private int year_ ; /** * <code>required uint32 year = 1;</code> */ public boolean hasYear() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required uint32 year = 1;</code> */ public int getYear() { return year_; } /** * <code>required uint32 year = 1;</code> */ public Builder setYear(int value) { bitField0_ |= 0x00000001; year_ = value; onChanged(); return this; } /** * <code>required uint32 year = 1;</code> */ public Builder clearYear() { bitField0_ = (bitField0_ & ~0x00000001); year_ = 0; onChanged(); return this; } // required uint32 month = 2; private int month_ ; /** * <code>required uint32 month = 2;</code> */ public boolean hasMonth() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <code>required uint32 month = 2;</code> */ public int getMonth() { return month_; } /** * <code>required uint32 month = 2;</code> */ public Builder setMonth(int value) { bitField0_ |= 0x00000002; month_ = value; onChanged(); return this; } /** * <code>required uint32 month = 2;</code> */ public Builder clearMonth() { bitField0_ = (bitField0_ & ~0x00000002); month_ = 0; onChanged(); return this; } // required uint32 dayOfMonth = 4; private int dayOfMonth_ ; /** * <code>required uint32 dayOfMonth = 4;</code> */ public boolean hasDayOfMonth() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * <code>required uint32 dayOfMonth = 4;</code> */ public int getDayOfMonth() { return dayOfMonth_; } /** * <code>required uint32 dayOfMonth = 4;</code> */ public Builder setDayOfMonth(int value) { bitField0_ |= 0x00000004; dayOfMonth_ = value; onChanged(); return this; } /** * <code>required uint32 dayOfMonth = 4;</code> */ public Builder clearDayOfMonth() { bitField0_ = (bitField0_ & ~0x00000004); dayOfMonth_ = 0; onChanged(); return this; } // required .io.netty.example.worldclock.DayOfWeek dayOfWeek = 5; private io.netty.example.worldclock.WorldClockProtocol.DayOfWeek dayOfWeek_ = io.netty.example.worldclock.WorldClockProtocol.DayOfWeek.SUNDAY; /** * <code>required .io.netty.example.worldclock.DayOfWeek dayOfWeek = 5;</code> */ public boolean hasDayOfWeek() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * <code>required .io.netty.example.worldclock.DayOfWeek dayOfWeek = 5;</code> */ public io.netty.example.worldclock.WorldClockProtocol.DayOfWeek getDayOfWeek() { return dayOfWeek_; } /** * <code>required .io.netty.example.worldclock.DayOfWeek dayOfWeek = 5;</code> */ public Builder setDayOfWeek(io.netty.example.worldclock.WorldClockProtocol.DayOfWeek value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; dayOfWeek_ = value; onChanged(); return this; } /** * <code>required .io.netty.example.worldclock.DayOfWeek dayOfWeek = 5;</code> */ public Builder clearDayOfWeek() { bitField0_ = (bitField0_ & ~0x00000008); dayOfWeek_ = io.netty.example.worldclock.WorldClockProtocol.DayOfWeek.SUNDAY; onChanged(); return this; } // required uint32 hour = 6; private int hour_ ; /** * <code>required uint32 hour = 6;</code> */ public boolean hasHour() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * <code>required uint32 hour = 6;</code> */ public int getHour() { return hour_; } /** * <code>required uint32 hour = 6;</code> */ public Builder setHour(int value) { bitField0_ |= 0x00000010; hour_ = value; onChanged(); return this; } /** * <code>required uint32 hour = 6;</code> */ public Builder clearHour() { bitField0_ = (bitField0_ & ~0x00000010); hour_ = 0; onChanged(); return this; } // required uint32 minute = 7; private int minute_ ; /** * <code>required uint32 minute = 7;</code> */ public boolean hasMinute() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * <code>required uint32 minute = 7;</code> */ public int getMinute() { return minute_; } /** * <code>required uint32 minute = 7;</code> */ public Builder setMinute(int value) { bitField0_ |= 0x00000020; minute_ = value; onChanged(); return this; } /** * <code>required uint32 minute = 7;</code> */ public Builder clearMinute() { bitField0_ = (bitField0_ & ~0x00000020); minute_ = 0; onChanged(); return this; } // required uint32 second = 8; private int second_ ; /** * <code>required uint32 second = 8;</code> */ public boolean hasSecond() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * <code>required uint32 second = 8;</code> */ public int getSecond() { return second_; } /** * <code>required uint32 second = 8;</code> */ public Builder setSecond(int value) { bitField0_ |= 0x00000040; second_ = value; onChanged(); return this; } /** * <code>required uint32 second = 8;</code> */ public Builder clearSecond() { bitField0_ = (bitField0_ & ~0x00000040); second_ = 0; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:io.netty.example.worldclock.LocalTime) } static { defaultInstance = new LocalTime(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:io.netty.example.worldclock.LocalTime) } public interface LocalTimesOrBuilder extends com.google.protobuf.MessageOrBuilder { // repeated .io.netty.example.worldclock.LocalTime localTime = 1; /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ java.util.List<io.netty.example.worldclock.WorldClockProtocol.LocalTime> getLocalTimeList(); /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ io.netty.example.worldclock.WorldClockProtocol.LocalTime getLocalTime(int index); /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ int getLocalTimeCount(); /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ java.util.List<? extends io.netty.example.worldclock.WorldClockProtocol.LocalTimeOrBuilder> getLocalTimeOrBuilderList(); /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ io.netty.example.worldclock.WorldClockProtocol.LocalTimeOrBuilder getLocalTimeOrBuilder( int index); } /** * Protobuf type {@code io.netty.example.worldclock.LocalTimes} */ public static final class LocalTimes extends com.google.protobuf.GeneratedMessage implements LocalTimesOrBuilder { // Use LocalTimes.newBuilder() to construct. private LocalTimes(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private LocalTimes(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final LocalTimes defaultInstance; public static LocalTimes getDefaultInstance() { return defaultInstance; } public LocalTimes getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private LocalTimes( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { localTime_ = new java.util.ArrayList<io.netty.example.worldclock.WorldClockProtocol.LocalTime>(); mutable_bitField0_ |= 0x00000001; } localTime_.add(input.readMessage(io.netty.example.worldclock.WorldClockProtocol.LocalTime.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { localTime_ = java.util.Collections.unmodifiableList(localTime_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_LocalTimes_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_LocalTimes_fieldAccessorTable .ensureFieldAccessorsInitialized( io.netty.example.worldclock.WorldClockProtocol.LocalTimes.class, io.netty.example.worldclock.WorldClockProtocol.LocalTimes.Builder.class); } public static com.google.protobuf.Parser<LocalTimes> PARSER = new com.google.protobuf.AbstractParser<LocalTimes>() { public LocalTimes parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new LocalTimes(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<LocalTimes> getParserForType() { return PARSER; } // repeated .io.netty.example.worldclock.LocalTime localTime = 1; public static final int LOCALTIME_FIELD_NUMBER = 1; private java.util.List<io.netty.example.worldclock.WorldClockProtocol.LocalTime> localTime_; /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public java.util.List<io.netty.example.worldclock.WorldClockProtocol.LocalTime> getLocalTimeList() { return localTime_; } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public java.util.List<? extends io.netty.example.worldclock.WorldClockProtocol.LocalTimeOrBuilder> getLocalTimeOrBuilderList() { return localTime_; } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public int getLocalTimeCount() { return localTime_.size(); } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.LocalTime getLocalTime(int index) { return localTime_.get(index); } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.LocalTimeOrBuilder getLocalTimeOrBuilder( int index) { return localTime_.get(index); } private void initFields() { localTime_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; for (int i = 0; i < getLocalTimeCount(); i++) { if (!getLocalTime(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < localTime_.size(); i++) { output.writeMessage(1, localTime_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < localTime_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, localTime_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTimes parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTimes parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTimes parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTimes parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTimes parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTimes parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTimes parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTimes parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTimes parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static io.netty.example.worldclock.WorldClockProtocol.LocalTimes parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(io.netty.example.worldclock.WorldClockProtocol.LocalTimes prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code io.netty.example.worldclock.LocalTimes} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements io.netty.example.worldclock.WorldClockProtocol.LocalTimesOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_LocalTimes_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_LocalTimes_fieldAccessorTable .ensureFieldAccessorsInitialized( io.netty.example.worldclock.WorldClockProtocol.LocalTimes.class, io.netty.example.worldclock.WorldClockProtocol.LocalTimes.Builder.class); } // Construct using io.netty.example.worldclock.WorldClockProtocol.LocalTimes.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getLocalTimeFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (localTimeBuilder_ == null) { localTime_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { localTimeBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_LocalTimes_descriptor; } public io.netty.example.worldclock.WorldClockProtocol.LocalTimes getDefaultInstanceForType() { return io.netty.example.worldclock.WorldClockProtocol.LocalTimes.getDefaultInstance(); } public io.netty.example.worldclock.WorldClockProtocol.LocalTimes build() { io.netty.example.worldclock.WorldClockProtocol.LocalTimes result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public io.netty.example.worldclock.WorldClockProtocol.LocalTimes buildPartial() { io.netty.example.worldclock.WorldClockProtocol.LocalTimes result = new io.netty.example.worldclock.WorldClockProtocol.LocalTimes(this); int from_bitField0_ = bitField0_; if (localTimeBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { localTime_ = java.util.Collections.unmodifiableList(localTime_); bitField0_ = (bitField0_ & ~0x00000001); } result.localTime_ = localTime_; } else { result.localTime_ = localTimeBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof io.netty.example.worldclock.WorldClockProtocol.LocalTimes) { return mergeFrom((io.netty.example.worldclock.WorldClockProtocol.LocalTimes)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(io.netty.example.worldclock.WorldClockProtocol.LocalTimes other) { if (other == io.netty.example.worldclock.WorldClockProtocol.LocalTimes.getDefaultInstance()) return this; if (localTimeBuilder_ == null) { if (!other.localTime_.isEmpty()) { if (localTime_.isEmpty()) { localTime_ = other.localTime_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureLocalTimeIsMutable(); localTime_.addAll(other.localTime_); } onChanged(); } } else { if (!other.localTime_.isEmpty()) { if (localTimeBuilder_.isEmpty()) { localTimeBuilder_.dispose(); localTimeBuilder_ = null; localTime_ = other.localTime_; bitField0_ = (bitField0_ & ~0x00000001); localTimeBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getLocalTimeFieldBuilder() : null; } else { localTimeBuilder_.addAllMessages(other.localTime_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { for (int i = 0; i < getLocalTimeCount(); i++) { if (!getLocalTime(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { io.netty.example.worldclock.WorldClockProtocol.LocalTimes parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (io.netty.example.worldclock.WorldClockProtocol.LocalTimes) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .io.netty.example.worldclock.LocalTime localTime = 1; private java.util.List<io.netty.example.worldclock.WorldClockProtocol.LocalTime> localTime_ = java.util.Collections.emptyList(); private void ensureLocalTimeIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { localTime_ = new java.util.ArrayList<io.netty.example.worldclock.WorldClockProtocol.LocalTime>(localTime_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< io.netty.example.worldclock.WorldClockProtocol.LocalTime, io.netty.example.worldclock.WorldClockProtocol.LocalTime.Builder, io.netty.example.worldclock.WorldClockProtocol.LocalTimeOrBuilder> localTimeBuilder_; /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public java.util.List<io.netty.example.worldclock.WorldClockProtocol.LocalTime> getLocalTimeList() { if (localTimeBuilder_ == null) { return java.util.Collections.unmodifiableList(localTime_); } else { return localTimeBuilder_.getMessageList(); } } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public int getLocalTimeCount() { if (localTimeBuilder_ == null) { return localTime_.size(); } else { return localTimeBuilder_.getCount(); } } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.LocalTime getLocalTime(int index) { if (localTimeBuilder_ == null) { return localTime_.get(index); } else { return localTimeBuilder_.getMessage(index); } } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public Builder setLocalTime( int index, io.netty.example.worldclock.WorldClockProtocol.LocalTime value) { if (localTimeBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLocalTimeIsMutable(); localTime_.set(index, value); onChanged(); } else { localTimeBuilder_.setMessage(index, value); } return this; } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public Builder setLocalTime( int index, io.netty.example.worldclock.WorldClockProtocol.LocalTime.Builder builderForValue) { if (localTimeBuilder_ == null) { ensureLocalTimeIsMutable(); localTime_.set(index, builderForValue.build()); onChanged(); } else { localTimeBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public Builder addLocalTime(io.netty.example.worldclock.WorldClockProtocol.LocalTime value) { if (localTimeBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLocalTimeIsMutable(); localTime_.add(value); onChanged(); } else { localTimeBuilder_.addMessage(value); } return this; } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public Builder addLocalTime( int index, io.netty.example.worldclock.WorldClockProtocol.LocalTime value) { if (localTimeBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureLocalTimeIsMutable(); localTime_.add(index, value); onChanged(); } else { localTimeBuilder_.addMessage(index, value); } return this; } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public Builder addLocalTime( io.netty.example.worldclock.WorldClockProtocol.LocalTime.Builder builderForValue) { if (localTimeBuilder_ == null) { ensureLocalTimeIsMutable(); localTime_.add(builderForValue.build()); onChanged(); } else { localTimeBuilder_.addMessage(builderForValue.build()); } return this; } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public Builder addLocalTime( int index, io.netty.example.worldclock.WorldClockProtocol.LocalTime.Builder builderForValue) { if (localTimeBuilder_ == null) { ensureLocalTimeIsMutable(); localTime_.add(index, builderForValue.build()); onChanged(); } else { localTimeBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public Builder addAllLocalTime( java.lang.Iterable<? extends io.netty.example.worldclock.WorldClockProtocol.LocalTime> values) { if (localTimeBuilder_ == null) { ensureLocalTimeIsMutable(); super.addAll(values, localTime_); onChanged(); } else { localTimeBuilder_.addAllMessages(values); } return this; } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public Builder clearLocalTime() { if (localTimeBuilder_ == null) { localTime_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { localTimeBuilder_.clear(); } return this; } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public Builder removeLocalTime(int index) { if (localTimeBuilder_ == null) { ensureLocalTimeIsMutable(); localTime_.remove(index); onChanged(); } else { localTimeBuilder_.remove(index); } return this; } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.LocalTime.Builder getLocalTimeBuilder( int index) { return getLocalTimeFieldBuilder().getBuilder(index); } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.LocalTimeOrBuilder getLocalTimeOrBuilder( int index) { if (localTimeBuilder_ == null) { return localTime_.get(index); } else { return localTimeBuilder_.getMessageOrBuilder(index); } } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public java.util.List<? extends io.netty.example.worldclock.WorldClockProtocol.LocalTimeOrBuilder> getLocalTimeOrBuilderList() { if (localTimeBuilder_ != null) { return localTimeBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(localTime_); } } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.LocalTime.Builder addLocalTimeBuilder() { return getLocalTimeFieldBuilder().addBuilder( io.netty.example.worldclock.WorldClockProtocol.LocalTime.getDefaultInstance()); } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public io.netty.example.worldclock.WorldClockProtocol.LocalTime.Builder addLocalTimeBuilder( int index) { return getLocalTimeFieldBuilder().addBuilder( index, io.netty.example.worldclock.WorldClockProtocol.LocalTime.getDefaultInstance()); } /** * <code>repeated .io.netty.example.worldclock.LocalTime localTime = 1;</code> */ public java.util.List<io.netty.example.worldclock.WorldClockProtocol.LocalTime.Builder> getLocalTimeBuilderList() { return getLocalTimeFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< io.netty.example.worldclock.WorldClockProtocol.LocalTime, io.netty.example.worldclock.WorldClockProtocol.LocalTime.Builder, io.netty.example.worldclock.WorldClockProtocol.LocalTimeOrBuilder> getLocalTimeFieldBuilder() { if (localTimeBuilder_ == null) { localTimeBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< io.netty.example.worldclock.WorldClockProtocol.LocalTime, io.netty.example.worldclock.WorldClockProtocol.LocalTime.Builder, io.netty.example.worldclock.WorldClockProtocol.LocalTimeOrBuilder>( localTime_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); localTime_ = null; } return localTimeBuilder_; } // @@protoc_insertion_point(builder_scope:io.netty.example.worldclock.LocalTimes) } static { defaultInstance = new LocalTimes(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:io.netty.example.worldclock.LocalTimes) } private static com.google.protobuf.Descriptors.Descriptor internal_static_io_netty_example_worldclock_Location_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_io_netty_example_worldclock_Location_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_io_netty_example_worldclock_Locations_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_io_netty_example_worldclock_Locations_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_io_netty_example_worldclock_LocalTime_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_io_netty_example_worldclock_LocalTime_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_io_netty_example_worldclock_LocalTimes_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_io_netty_example_worldclock_LocalTimes_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { "\nBsrc/main/java/io/netty/example/worldcl" + "ock/WorldClockProtocol.proto\022\033io.netty.e" + "xample.worldclock\"S\n\010Location\0229\n\tcontine" + "nt\030\001 \002(\0162&.io.netty.example.worldclock.C" + "ontinent\022\014\n\004city\030\002 \002(\t\"D\n\tLocations\0227\n\010l" + "ocation\030\001 \003(\0132%.io.netty.example.worldcl" + "ock.Location\"\245\001\n\tLocalTime\022\014\n\004year\030\001 \002(\r" + "\022\r\n\005month\030\002 \002(\r\022\022\n\ndayOfMonth\030\004 \002(\r\0229\n\td" + "ayOfWeek\030\005 \002(\0162&.io.netty.example.worldc" + "lock.DayOfWeek\022\014\n\004hour\030\006 \002(\r\022\016\n\006minute\030\007", " \002(\r\022\016\n\006second\030\010 \002(\r\"G\n\nLocalTimes\0229\n\tlo" + "calTime\030\001 \003(\0132&.io.netty.example.worldcl" + "ock.LocalTime*\231\001\n\tContinent\022\n\n\006AFRICA\020\000\022" + "\013\n\007AMERICA\020\001\022\016\n\nANTARCTICA\020\002\022\n\n\006ARCTIC\020\003" + "\022\010\n\004ASIA\020\004\022\014\n\010ATLANTIC\020\005\022\r\n\tAUSTRALIA\020\006\022" + "\n\n\006EUROPE\020\007\022\n\n\006INDIAN\020\010\022\013\n\007MIDEAST\020\t\022\013\n\007" + "PACIFIC\020\n*g\n\tDayOfWeek\022\n\n\006SUNDAY\020\001\022\n\n\006MO" + "NDAY\020\002\022\013\n\007TUESDAY\020\003\022\r\n\tWEDNESDAY\020\004\022\014\n\010TH" + "URSDAY\020\005\022\n\n\006FRIDAY\020\006\022\014\n\010SATURDAY\020\007B\002H\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor root) { descriptor = root; internal_static_io_netty_example_worldclock_Location_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_io_netty_example_worldclock_Location_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_io_netty_example_worldclock_Location_descriptor, new java.lang.String[] { "Continent", "City", }); internal_static_io_netty_example_worldclock_Locations_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_io_netty_example_worldclock_Locations_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_io_netty_example_worldclock_Locations_descriptor, new java.lang.String[] { "Location", }); internal_static_io_netty_example_worldclock_LocalTime_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_io_netty_example_worldclock_LocalTime_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_io_netty_example_worldclock_LocalTime_descriptor, new java.lang.String[] { "Year", "Month", "DayOfMonth", "DayOfWeek", "Hour", "Minute", "Second", }); internal_static_io_netty_example_worldclock_LocalTimes_descriptor = getDescriptor().getMessageTypes().get(3); internal_static_io_netty_example_worldclock_LocalTimes_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_io_netty_example_worldclock_LocalTimes_descriptor, new java.lang.String[] { "LocalTime", }); return null; } }; com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { }, assigner); } // @@protoc_insertion_point(outer_class_scope) }
apache-2.0
Juriy/masterhack
masterpass/src/com/mastercard/mcwallet/sdk/xml/switchapiservices/PairingDataType.java
2955
// // This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.7 // See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a> // Any modifications to this file will be lost upon recompilation of the source schema. // Generated on: 2014.08.21 at 12:22:14 PM CDT // package com.mastercard.mcwallet.sdk.xml.switchapiservices; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlType; /** * <p>Java class for PairingDataType complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="PairingDataType"> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;sequence> * &lt;element name="Type"> * &lt;simpleType> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}string"> * &lt;enumeration value="CARD"/> * &lt;enumeration value="ADDRESS"/> * &lt;enumeration value="REWARD_PROGRAM"/> * &lt;enumeration value="PROFILE"/> * &lt;/restriction> * &lt;/simpleType> * &lt;/element> * &lt;element name="ExtensionPoint" type="{}ExtensionPoint" minOccurs="0"/> * &lt;/sequence> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "PairingDataType", propOrder = { "type", "extensionPoint" }) public class PairingDataType { @XmlElement(name = "Type", required = true) protected String type; @XmlElement(name = "ExtensionPoint") protected ExtensionPoint extensionPoint; /** * Gets the value of the type property. * * @return * possible object is * {@link String } * */ public String getType() { return type; } /** * Sets the value of the type property. * * @param value * allowed object is * {@link String } * */ public void setType(String value) { this.type = value; } /** * Gets the value of the extensionPoint property. * * @return * possible object is * {@link ExtensionPoint } * */ public ExtensionPoint getExtensionPoint() { return extensionPoint; } /** * Sets the value of the extensionPoint property. * * @param value * allowed object is * {@link ExtensionPoint } * */ public void setExtensionPoint(ExtensionPoint value) { this.extensionPoint = value; } }
isc
TealCube/strife
src/main/java/land/face/strife/data/effects/PlaySound.java
961
package land.face.strife.data.effects; import land.face.strife.data.StrifeMob; import org.bukkit.Location; import org.bukkit.Sound; public class PlaySound extends LocationEffect { private Sound sound; private float volume; private float pitch; @Override public void apply(StrifeMob caster, StrifeMob target) { Location loc = target.getEntity().getLocation().clone(); loc.getWorld().playSound(loc, sound, volume, pitch); } @Override public void applyAtLocation(StrifeMob caster, Location location) { location.getWorld().playSound(location, sound, volume, pitch); } public Sound getSound() { return sound; } public void setSound(Sound sound) { this.sound = sound; } public float getVolume() { return volume; } public void setVolume(float volume) { this.volume = volume; } public float getPitch() { return pitch; } public void setPitch(float pitch) { this.pitch = pitch; } }
isc
dankito/ormlite-jpa-core
src/test/java/com/j256/ormlite/stmt/StatementExecutorTest.java
8101
package com.j256.ormlite.stmt; import com.j256.ormlite.dao.Dao; import com.j256.ormlite.field.DatabaseField; import com.j256.ormlite.jpa.EntityConfig; import com.j256.ormlite.stmt.StatementBuilder.StatementType; import com.j256.ormlite.support.CompiledStatement; import com.j256.ormlite.support.DatabaseConnection; import org.junit.Test; import java.sql.SQLException; import java.util.ArrayList; import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicBoolean; import static org.easymock.EasyMock.createMock; import static org.easymock.EasyMock.expect; import static org.easymock.EasyMock.replay; import static org.easymock.EasyMock.verify; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; public class StatementExecutorTest extends BaseCoreStmtTest { @Test public void testUpdateThrow() throws Exception { EntityConfig<Foo, String> entityConfig = new EntityConfig<Foo, String>(connectionSource, null, Foo.class); DatabaseConnection connection = createMock(DatabaseConnection.class); @SuppressWarnings("unchecked") PreparedUpdate<Foo> update = createMock(PreparedUpdate.class); CompiledStatement compiledStmt = createMock(CompiledStatement.class); expect(update.compile(connection, StatementType.UPDATE)).andReturn(compiledStmt); expect(compiledStmt.runUpdate()).andThrow(new SQLException("expected")); compiledStmt.close(); StatementExecutor<Foo, String> statementExec = new StatementExecutor<Foo, String>(databaseType, entityConfig, null); replay(connection, compiledStmt, update); try { statementExec.update(connection, update); fail("should have thrown"); } catch (SQLException e) { // expected } verify(connection, compiledStmt, update); } @Test public void testDeleteThrow() throws Exception { EntityConfig<Foo, String> entityConfig = new EntityConfig<Foo, String>(connectionSource, null, Foo.class); DatabaseConnection connection = createMock(DatabaseConnection.class); @SuppressWarnings("unchecked") PreparedDelete<Foo> delete = createMock(PreparedDelete.class); CompiledStatement compiledStmt = createMock(CompiledStatement.class); expect(delete.compile(connection, StatementType.DELETE)).andReturn(compiledStmt); expect(compiledStmt.runUpdate()).andThrow(new SQLException("expected")); compiledStmt.close(); StatementExecutor<Foo, String> statementExec = new StatementExecutor<Foo, String>(databaseType, entityConfig, null); replay(connection, compiledStmt, delete); try { statementExec.delete(connection, delete); fail("should have thrown"); } catch (SQLException e) { // expected } verify(connection, compiledStmt, delete); } @Test public void testCallBatchTasksNoAutoCommit() throws Exception { EntityConfig<Foo, String> entityConfig = new EntityConfig<Foo, String>(connectionSource, null, Foo.class); DatabaseConnection connection = createMock(DatabaseConnection.class); expect(connection.isAutoCommitSupported()).andReturn(false); StatementExecutor<Foo, String> statementExec = new StatementExecutor<Foo, String>(databaseType, entityConfig, null); replay(connection); final AtomicBoolean called = new AtomicBoolean(false); statementExec.callBatchTasks(connection, false, new Callable<Void>() { public Void call() { called.set(true); return null; } }); assertTrue(called.get()); verify(connection); } @Test public void testCallBatchTasksAutoCommitFalse() throws Exception { EntityConfig<Foo, String> entityConfig = new EntityConfig<Foo, String>(connectionSource, null, Foo.class); DatabaseConnection connection = createMock(DatabaseConnection.class); expect(connection.isAutoCommitSupported()).andReturn(true); expect(connection.isAutoCommit()).andReturn(false); StatementExecutor<Foo, String> statementExec = new StatementExecutor<Foo, String>(databaseType, entityConfig, null); replay(connection); final AtomicBoolean called = new AtomicBoolean(false); statementExec.callBatchTasks(connection, false, new Callable<Void>() { public Void call() { called.set(true); return null; } }); assertTrue(called.get()); verify(connection); } @Test public void testCallBatchTasksAutoCommitTrue() throws Exception { EntityConfig<Foo, String> entityConfig = new EntityConfig<Foo, String>(connectionSource, null, Foo.class); DatabaseConnection connection = createMock(DatabaseConnection.class); expect(connection.isAutoCommitSupported()).andReturn(true); expect(connection.isAutoCommit()).andReturn(true); connection.setAutoCommit(false); connection.setAutoCommit(true); StatementExecutor<Foo, String> statementExec = new StatementExecutor<Foo, String>(databaseType, entityConfig, null); replay(connection); final AtomicBoolean called = new AtomicBoolean(false); statementExec.callBatchTasks(connection, false, new Callable<Void>() { public Void call() { called.set(true); return null; } }); assertTrue(called.get()); verify(connection); } @Test public void testCallBatchTasksAutoCommitTrueThrow() throws Exception { EntityConfig<Foo, String> entityConfig = new EntityConfig<Foo, String>(connectionSource, null, Foo.class); DatabaseConnection connection = createMock(DatabaseConnection.class); expect(connection.isAutoCommitSupported()).andReturn(true); expect(connection.isAutoCommit()).andReturn(true); connection.setAutoCommit(false); connection.setAutoCommit(true); StatementExecutor<Foo, String> statementExec = new StatementExecutor<Foo, String>(databaseType, entityConfig, null); replay(connection); try { statementExec.callBatchTasks(connection, false, new Callable<Void>() { public Void call() throws Exception { throw new Exception("expected"); } }); fail("Should have thrown"); } catch (Exception e) { // expected } verify(connection); } @Test(expected = SQLException.class) public void testUpdateIdNoId() throws Exception { Dao<NoId, Object> noIdDao = createDao(NoId.class, true); NoId noId = new NoId(); noId.stuff = "1"; assertEquals(1, noIdDao.create(noId)); noIdDao.updateId(noId, "something else"); } @Test(expected = SQLException.class) public void testRefreshNoId() throws Exception { Dao<NoId, Object> noIdDao = createDao(NoId.class, true); NoId noId = new NoId(); noId.stuff = "1"; assertEquals(1, noIdDao.create(noId)); noIdDao.refresh(noId); } @Test(expected = SQLException.class) public void testDeleteNoId() throws Exception { Dao<NoId, Object> noIdDao = createDao(NoId.class, true); NoId noId = new NoId(); noId.stuff = "1"; assertEquals(1, noIdDao.create(noId)); noIdDao.delete(noId); } @Test(expected = SQLException.class) public void testDeleteObjectsNoId() throws Exception { Dao<NoId, Object> noIdDao = createDao(NoId.class, true); NoId noId = new NoId(); noId.stuff = "1"; assertEquals(1, noIdDao.create(noId)); ArrayList<NoId> noIdList = new ArrayList<NoId>(); noIdList.add(noId); noIdDao.delete(noIdList); } @Test(expected = SQLException.class) public void testDeleteIdsNoId() throws Exception { Dao<NoId, Object> noIdDao = createDao(NoId.class, true); NoId noId = new NoId(); noId.stuff = "1"; assertEquals(1, noIdDao.create(noId)); ArrayList<Object> noIdList = new ArrayList<Object>(); noIdList.add(noId); noIdDao.deleteIds(noIdList); } @Test public void testCallBatchTasksCommitted() throws Exception { final Dao<Foo, Integer> dao = createDao(Foo.class, true); final Foo foo1 = new Foo(); DatabaseConnection conn = dao.startThreadConnection(); try { dao.callBatchTasks(new Callable<Void>() { public Void call() throws Exception { assertEquals(1, dao.create(foo1)); assertNotNull(dao.queryForId(foo1.id)); return null; } }); dao.rollBack(conn); assertNotNull(dao.queryForId(foo1.id)); } finally { dao.endThreadConnection(conn); } } protected static class NoId { @DatabaseField String stuff; } }
isc
tanguydevos/RSSFeed
app/src/main/java/com/tanguy/rssfeed/service/RetrofitService.java
1039
package com.tanguy.rssfeed.service; import com.tanguy.rssfeed.model.Channel; import com.tanguy.rssfeed.model.Feed; import java.util.List; import okhttp3.ResponseBody; import retrofit2.Call; import retrofit2.http.Field; import retrofit2.http.FormUrlEncoded; import retrofit2.http.GET; import retrofit2.http.POST; import retrofit2.http.Query; import retrofit2.http.Url; interface RetrofitService { // Users routes and endpoints @FormUrlEncoded @POST("login") Call<ResponseBody> loginUser(@Field("login") String login, @Field("password") String password); @FormUrlEncoded @POST("signup") Call<ResponseBody> signupUser(@Field("login") String login, @Field("password") String password); // Channels routes and endpoints @GET("channels") Call<List<Channel>> getChannels(@Query("token") String token); @GET Call<List<Feed>> getChannel(@Url String url); @FormUrlEncoded @POST("rss/new") Call<ResponseBody> addChannel(@Field("token") String token, @Field("rssUrl") String url); }
mit
hprose/hprose-java
src/main/java/hprose/io/unserialize/DateArrayUnserializer.java
1851
/**********************************************************\ | | | hprose | | | | Official WebSite: http://www.hprose.com/ | | http://www.hprose.org/ | | | \**********************************************************/ /**********************************************************\ * * * DateArrayUnserializer.java * * * * Date array unserializer class for Java. * * * * LastModified: Aug 3, 2016 * * Author: Ma Bingyao <andot@hprose.com> * * * \**********************************************************/ package hprose.io.unserialize; import static hprose.io.HproseTags.TagEmpty; import static hprose.io.HproseTags.TagList; import java.io.IOException; import java.lang.reflect.Type; import java.sql.Date; public final class DateArrayUnserializer extends BaseUnserializer<Date[]> { public final static DateArrayUnserializer instance = new DateArrayUnserializer(); @Override public Date[] unserialize(Reader reader, int tag, Type type) throws IOException { if (tag == TagList) return ReferenceReader.readDateArray(reader); if (tag == TagEmpty) return new Date[0]; return super.unserialize(reader, tag, type); } public Date[] read(Reader reader) throws IOException { return read(reader, Date[].class); } }
mit
BlueGoliath/GoliathOCBackend
src/goliath/ou/interfaces/CsvData.java
1269
/* * The MIT License * * Copyright 2017 Ty Young. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package goliath.ou.interfaces; import java.util.HashMap; public interface CsvData { public HashMap<String, String> getCsvData(); }
mit
aJanuary/argparse4j
src/main/java/net/sourceforge/argparse4j/internal/TerminalWidth.java
4611
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.sourceforge.argparse4j.internal; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Returns the column width of the command line terminal from which this program * was started. Typically the column width is around 80 characters or so. * * Currently works on Linux and OSX. * * Returns -1 if the column width cannot be determined for some reason. */ public class TerminalWidth { private static final int UNKNOWN_WIDTH = -1; public static void main(String[] args) { System.out.println("terminalWidth: " + new TerminalWidth().getTerminalWidth()); } public int getTerminalWidth() { String width = System.getenv("COLUMNS"); if (width != null) { try { return Integer.parseInt(width); } catch (NumberFormatException e) { return UNKNOWN_WIDTH; } } try { return getTerminalWidth2(); } catch (IOException e) { return UNKNOWN_WIDTH; } } // see // http://grokbase.com/t/gg/clojure/127qwgscvc/how-do-you-determine-terminal-console-width-in-%60lein-repl%60 private int getTerminalWidth2() throws IOException { String osName = System.getProperty("os.name"); boolean isOSX = osName.startsWith("Mac OS X"); boolean isLinux = osName.startsWith("Linux") || osName.startsWith("LINUX"); if (!isLinux && !isOSX) { return UNKNOWN_WIDTH; // actually, this might also work on Solaris // but this hasn't been tested } ProcessBuilder builder = new ProcessBuilder(which("sh").toString(), "-c", "stty -a < /dev/tty"); builder.redirectErrorStream(true); Process process = builder.start(); InputStream in = process.getInputStream(); ByteArrayOutputStream resultBytes = new ByteArrayOutputStream(); try { byte[] buf = new byte[1024]; int len; while ((len = in.read(buf)) >= 0) { resultBytes.write(buf, 0, len); } } finally { in.close(); } String result = new String(resultBytes.toByteArray()); // System.out.println("result=" + result); try { if (process.waitFor() != 0) { return UNKNOWN_WIDTH; } } catch (InterruptedException e) { return UNKNOWN_WIDTH; } String pattern; if (isOSX) { // Extract columns from a line such as this: // speed 9600 baud; 39 rows; 80 columns; pattern = "(\\d+) columns"; } else { // Extract columns from a line such as this: // speed 9600 baud; rows 50; columns 83; line = 0; pattern = "columns (\\d+)"; } Matcher m = Pattern.compile(pattern).matcher(result); if (!m.find()) { return UNKNOWN_WIDTH; } result = m.group(1); try { return Integer.parseInt(result); } catch (NumberFormatException e) { return UNKNOWN_WIDTH; } } private File which(String cmd) throws IOException { String path = System.getenv("PATH"); if (path != null) { for (String dir : path.split(Pattern.quote(File.pathSeparator))) { File command = new File(dir.trim(), cmd); if (command.canExecute()) { return command.getAbsoluteFile(); } } } throw new IOException("No command '" + cmd + "' on path " + path); } }
mit