text
stringlengths
7
1.01M
package com.mongodb.pipeline.transfer.test.util; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.util.HashMap; import java.util.Map; /** * <pre> * Modify Information: * Author Date Description * ============ ============= ============================ * lilei 2019/9/16 Create this file * </pre> */ public final class FileUtils { private FileUtils() { } /** * 读取目录内所有文件内容 * @param path * @return */ public static Map<String, String> readDirToString(String path) { Map<String, String> result = null; File dir = new File(path); if (dir.isDirectory()) { String[] files = dir.list(); result = new HashMap<String, String>((int) (files.length / 0.75)); if (!path.endsWith(File.separator)) { path += File.separator; } for (int i = 0; i < files.length; i++) { String filename = path + files[i]; result.put(filename, readFileToString(filename)); } } return result; } /** * 读取文件内容 * @param fileName * @return */ public static String readFileToString(String fileName) { FileInputStream fis = null; try { File file = new File(fileName); byte[] filecontent = new byte[(int) file.length()]; fis = new FileInputStream(file); fis.read(filecontent); return new String(filecontent); } catch (FileNotFoundException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } finally { if (null != fis) { try { fis.close(); } catch (IOException e) { e.printStackTrace(); } } } return null; } }
package com.mx.mylibrary; public class CustomException extends RuntimeException { public CustomException() { super(); } public CustomException(String message) { super(message); } }
/* * Copyright 2014 by the Metanome project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package de.metanome.algorithm_integration; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonTypeName; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.TreeSet; /** * This is the leaf node class for the {@link ColumnCondition} using the composite pattern. * * @author Jens Ehrlich */ @JsonTypeName("ColumnConditionValue") public class ColumnConditionValue implements ColumnCondition { protected ColumnIdentifier columnIdentifier; protected String columnValue; protected boolean isNegated; protected float coverage = 0; /** * Exists for Gwt serialization */ protected ColumnConditionValue() { this.columnIdentifier = new ColumnIdentifier(); this.columnValue = new String(); this.isNegated = false; } /** * Constructs a {@link ColumnConditionValue} using a {@link ColumnIdentifier} and a {@link * java.lang.String }. By default the condition is not negated. */ public ColumnConditionValue(ColumnIdentifier columnIdentifier, String columnValue) { this.columnIdentifier = columnIdentifier; this.columnValue = columnValue; this.isNegated = false; } /** * Constructs a {@link ColumnConditionValue} using a {@link ColumnIdentifier}, a {@link * java.lang.String }, and a boolean which indicates the negation of the condition. */ public ColumnConditionValue(ColumnIdentifier columnIdentifier, String columnValue, boolean isNegated) { this(columnIdentifier, columnValue); this.isNegated = isNegated; } @Override public float getCoverage() { return coverage; } @Override public void setCoverage(float coverage) { this.coverage = coverage; } public ColumnIdentifier getColumnIdentifier() { return columnIdentifier; } public void setColumnIdentifier(ColumnIdentifier columnIdentifier) { this.columnIdentifier = columnIdentifier; } public String getColumnValue() { return columnValue; } public void setColumnValue(String columnValue) { this.columnValue = columnValue; } public boolean isNegated() { return isNegated; } public void setNegated(boolean isNegated) { this.isNegated = isNegated; } @Override @JsonIgnore public TreeSet<ColumnIdentifier> getContainedColumns() { TreeSet<ColumnIdentifier> result = new TreeSet<>(); result.add(this.columnIdentifier); return result; } @Override @JsonIgnore public List<Map<ColumnIdentifier, String>> getPatternConditions() { List<Map<ColumnIdentifier, String>> result = new LinkedList<>(); Map<ColumnIdentifier, String> condition = new TreeMap<>(); condition.put(this.columnIdentifier, this.columnValue); result.add(condition); return result; } @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append(this.columnIdentifier.toString()); builder.append("= "); if (this.isNegated) { builder.append(NOT); } builder.append(this.columnValue); return builder.toString(); } @Override public ColumnCondition add(ColumnCondition value) { return this; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ColumnConditionValue that = (ColumnConditionValue) o; if (isNegated != that.isNegated) { return false; } if (!columnIdentifier.equals(that.columnIdentifier)) { return false; } if (!columnValue.equals(that.columnValue)) { return false; } return true; } @Override public int hashCode() { int result = columnIdentifier.hashCode(); result = 31 * result + columnValue.hashCode(); result = 31 * result + (isNegated ? 1 : 0); return result; } @Override public int compareTo(ColumnCondition o) { if (o instanceof ColumnConditionValue) { ColumnConditionValue other = (ColumnConditionValue) o; if (other.isNegated == this.isNegated) { int columnComparison = this.columnIdentifier.compareTo(other.columnIdentifier); if (columnComparison != 0) { return columnComparison; } else { return this.columnValue.compareTo(other.columnValue); } } else { if (this.isNegated) { return 1; } else { return -1; } } } return -1; } }
/* * Copyright 2011-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not * use this file except in compliance with the License. A copy of the License is * located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.cloudfront.model; import java.io.Serializable; import com.amazonaws.AmazonWebServiceRequest; /** * The request to list distributions that are associated with a specified AWS * WAF web ACL. */ public class ListDistributionsByWebACLIdRequest extends AmazonWebServiceRequest implements Serializable, Cloneable { /** * Use Marker and MaxItems to control pagination of results. If you have * more than MaxItems distributions that satisfy the request, the response * includes a NextMarker element. To get the next page of results, submit * another request. For the value of Marker, specify the value of NextMarker * from the last response. (For the first request, omit Marker.) */ private String marker; /** * The maximum number of distributions that you want CloudFront to return in * the response body. The maximum and default values are both 100. */ private String maxItems; /** * The Id of the AWS WAF web ACL for which you want to list the associated * distributions. If you specify "null" for the Id, the request returns a * list of the distributions that aren't associated with a web ACL. */ private String webACLId; /** * Use Marker and MaxItems to control pagination of results. If you have * more than MaxItems distributions that satisfy the request, the response * includes a NextMarker element. To get the next page of results, submit * another request. For the value of Marker, specify the value of NextMarker * from the last response. (For the first request, omit Marker.) * * @param marker * Use Marker and MaxItems to control pagination of results. If you * have more than MaxItems distributions that satisfy the request, * the response includes a NextMarker element. To get the next page * of results, submit another request. For the value of Marker, * specify the value of NextMarker from the last response. (For the * first request, omit Marker.) */ public void setMarker(String marker) { this.marker = marker; } /** * Use Marker and MaxItems to control pagination of results. If you have * more than MaxItems distributions that satisfy the request, the response * includes a NextMarker element. To get the next page of results, submit * another request. For the value of Marker, specify the value of NextMarker * from the last response. (For the first request, omit Marker.) * * @return Use Marker and MaxItems to control pagination of results. If you * have more than MaxItems distributions that satisfy the request, * the response includes a NextMarker element. To get the next page * of results, submit another request. For the value of Marker, * specify the value of NextMarker from the last response. (For the * first request, omit Marker.) */ public String getMarker() { return this.marker; } /** * Use Marker and MaxItems to control pagination of results. If you have * more than MaxItems distributions that satisfy the request, the response * includes a NextMarker element. To get the next page of results, submit * another request. For the value of Marker, specify the value of NextMarker * from the last response. (For the first request, omit Marker.) * * @param marker * Use Marker and MaxItems to control pagination of results. If you * have more than MaxItems distributions that satisfy the request, * the response includes a NextMarker element. To get the next page * of results, submit another request. For the value of Marker, * specify the value of NextMarker from the last response. (For the * first request, omit Marker.) * @return Returns a reference to this object so that method calls can be * chained together. */ public ListDistributionsByWebACLIdRequest withMarker(String marker) { setMarker(marker); return this; } /** * The maximum number of distributions that you want CloudFront to return in * the response body. The maximum and default values are both 100. * * @param maxItems * The maximum number of distributions that you want CloudFront to * return in the response body. The maximum and default values are * both 100. */ public void setMaxItems(String maxItems) { this.maxItems = maxItems; } /** * The maximum number of distributions that you want CloudFront to return in * the response body. The maximum and default values are both 100. * * @return The maximum number of distributions that you want CloudFront to * return in the response body. The maximum and default values are * both 100. */ public String getMaxItems() { return this.maxItems; } /** * The maximum number of distributions that you want CloudFront to return in * the response body. The maximum and default values are both 100. * * @param maxItems * The maximum number of distributions that you want CloudFront to * return in the response body. The maximum and default values are * both 100. * @return Returns a reference to this object so that method calls can be * chained together. */ public ListDistributionsByWebACLIdRequest withMaxItems(String maxItems) { setMaxItems(maxItems); return this; } /** * The Id of the AWS WAF web ACL for which you want to list the associated * distributions. If you specify "null" for the Id, the request returns a * list of the distributions that aren't associated with a web ACL. * * @param webACLId * The Id of the AWS WAF web ACL for which you want to list the * associated distributions. If you specify "null" for the Id, the * request returns a list of the distributions that aren't associated * with a web ACL. */ public void setWebACLId(String webACLId) { this.webACLId = webACLId; } /** * The Id of the AWS WAF web ACL for which you want to list the associated * distributions. If you specify "null" for the Id, the request returns a * list of the distributions that aren't associated with a web ACL. * * @return The Id of the AWS WAF web ACL for which you want to list the * associated distributions. If you specify "null" for the Id, the * request returns a list of the distributions that aren't * associated with a web ACL. */ public String getWebACLId() { return this.webACLId; } /** * The Id of the AWS WAF web ACL for which you want to list the associated * distributions. If you specify "null" for the Id, the request returns a * list of the distributions that aren't associated with a web ACL. * * @param webACLId * The Id of the AWS WAF web ACL for which you want to list the * associated distributions. If you specify "null" for the Id, the * request returns a list of the distributions that aren't associated * with a web ACL. * @return Returns a reference to this object so that method calls can be * chained together. */ public ListDistributionsByWebACLIdRequest withWebACLId(String webACLId) { setWebACLId(webACLId); return this; } /** * Returns a string representation of this object; useful for testing and * debugging. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getMarker() != null) sb.append("Marker: " + getMarker() + ","); if (getMaxItems() != null) sb.append("MaxItems: " + getMaxItems() + ","); if (getWebACLId() != null) sb.append("WebACLId: " + getWebACLId()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof ListDistributionsByWebACLIdRequest == false) return false; ListDistributionsByWebACLIdRequest other = (ListDistributionsByWebACLIdRequest) obj; if (other.getMarker() == null ^ this.getMarker() == null) return false; if (other.getMarker() != null && other.getMarker().equals(this.getMarker()) == false) return false; if (other.getMaxItems() == null ^ this.getMaxItems() == null) return false; if (other.getMaxItems() != null && other.getMaxItems().equals(this.getMaxItems()) == false) return false; if (other.getWebACLId() == null ^ this.getWebACLId() == null) return false; if (other.getWebACLId() != null && other.getWebACLId().equals(this.getWebACLId()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getMarker() == null) ? 0 : getMarker().hashCode()); hashCode = prime * hashCode + ((getMaxItems() == null) ? 0 : getMaxItems().hashCode()); hashCode = prime * hashCode + ((getWebACLId() == null) ? 0 : getWebACLId().hashCode()); return hashCode; } @Override public ListDistributionsByWebACLIdRequest clone() { return (ListDistributionsByWebACLIdRequest) super.clone(); } }
package org.esbench.generator.field.utils; import static org.testng.Assert.assertEquals; import org.apache.commons.lang3.RandomUtils; import org.esbench.testng.UtilityClassValidator; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; public class AddressUtilsTest { private static final long HIGHEST_IP = 4294967295L; private static final int LOWEST_IP = 0; @Test public void utilityTest() { UtilityClassValidator.validate(AddressUtils.class); } @DataProvider public Object[][] invalidTextInputsDataProvider() { Object[][] values = { { "" }, { "127." }, { "256.0.0.0" }, { "A.B.C.D" } }; return values; } @Test(dataProvider = "invalidTextInputsDataProvider", expectedExceptions = IllegalArgumentException.class) public void invalidTextInputs(String ipAsText) { AddressUtils.ipv4ToLong(ipAsText); } @DataProvider public Object[][] invalidLongInputsDataProvider() { Object[][] values = { { -1L }, { HIGHEST_IP + 1 } }; return values; } @Test(dataProvider = "invalidLongInputsDataProvider", expectedExceptions = IllegalArgumentException.class) public void invalidLongInputs(long ipAsLong) { AddressUtils.longToIpv4(ipAsLong); } @DataProvider public Object[][] longToIpv4DataProvider() { Object[][] values = { { LOWEST_IP, "0.0.0.0" }, { HIGHEST_IP, "255.255.255.255" }, { 255, "0.0.0.255" }, { 2130706433, "127.0.0.1" }, { 2130706434, "127.0.0.2" }, }; return values; } @Test(dataProvider = "longToIpv4DataProvider") public void longToIpv4(long ipAsLong, String expected) { assertEquals(AddressUtils.longToIpv4(ipAsLong), expected); } @Test(dataProvider = "longToIpv4DataProvider") public void ipv4ToLong(long expected, String ipAsText) { assertEquals(AddressUtils.ipv4ToLong(ipAsText), expected); } @Test(invocationCount = 10) public void backAndForth() { long expected = RandomUtils.nextLong(0L, HIGHEST_IP + 1); String ip = AddressUtils.longToIpv4(expected); assertEquals(AddressUtils.ipv4ToLong(ip), expected); } @DataProvider public Object[][] toCIDRDataProvider() { Object[][] values = { { "127.0.0.5", "127.0.0.55", "127.0.0.0/26" }, { "127.0.0.32", "127.0.0.55", "127.0.0.0/26" }, { "127.0.0.50", "127.0.0.255", "127.0.0.0/24" }, { "127.0.0.50", "127.0.1.50", "127.0.0.0/23" }, { "127.0.4.50", "127.0.32.255", "127.0.0.0/18" }, { "127.0.4.50", "127.0.31.255", "127.0.0.0/19" }, { "127.0.4.50", "127.0.8.255", "127.0.0.0/20" }, { "255.255.255.255", "255.255.255.255", "255.255.255.255/32" }, }; return values; } @Test(dataProvider = "toCIDRDataProvider") public void toCIDR(String ipAsText, String ipBsText, String expected) { long ipA = AddressUtils.ipv4ToLong(ipAsText); long ipB = AddressUtils.ipv4ToLong(ipBsText); assertEquals(AddressUtils.toCIDR(ipA, ipB), expected); assertEquals(AddressUtils.toCIDR(ipB, ipA), expected); } @DataProvider public Object[][] numberOfAddressDataProvider() { Object[][] values = { { "127.0.0.0/32", 1 }, { "127.0.0.0/31", 2 }, { "127.0.0.0/30", 4 }, { "127.0.0.0/19", 8192 } }; return values; } @Test(dataProvider = "numberOfAddressDataProvider") public void numberOfAddress(String cidrAddress, int expected) { assertEquals(AddressUtils.numberOfAddress(cidrAddress), expected); } }
/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for * license information. * * Code generated by Microsoft (R) AutoRest Code Generator. */ package com.microsoft.azure.management.batchai.v2018_03_01; import com.fasterxml.jackson.annotation.JsonProperty; /** * Details of the Azure File Share to mount on the cluster. */ public class AzureFileShareReference { /** * Name of the storage account. */ @JsonProperty(value = "accountName", required = true) private String accountName; /** * URL to access the Azure File. */ @JsonProperty(value = "azureFileUrl", required = true) private String azureFileUrl; /** * Information of the Azure File credentials. */ @JsonProperty(value = "credentials", required = true) private AzureStorageCredentialsInfo credentials; /** * Specifies the relative path on the compute node where the Azure file * share will be mounted. * Note that all cluster level file shares will be mounted under * $AZ_BATCHAI_MOUNT_ROOT location and all job level file shares will be * mounted under $AZ_BATCHAI_JOB_MOUNT_ROOT. */ @JsonProperty(value = "relativeMountPath", required = true) private String relativeMountPath; /** * Specifies the file mode. * Default value is 0777. Valid only if OS is linux. */ @JsonProperty(value = "fileMode") private String fileMode; /** * Specifies the directory Mode. * Default value is 0777. Valid only if OS is linux. */ @JsonProperty(value = "directoryMode") private String directoryMode; /** * Get the accountName value. * * @return the accountName value */ public String accountName() { return this.accountName; } /** * Set the accountName value. * * @param accountName the accountName value to set * @return the AzureFileShareReference object itself. */ public AzureFileShareReference withAccountName(String accountName) { this.accountName = accountName; return this; } /** * Get the azureFileUrl value. * * @return the azureFileUrl value */ public String azureFileUrl() { return this.azureFileUrl; } /** * Set the azureFileUrl value. * * @param azureFileUrl the azureFileUrl value to set * @return the AzureFileShareReference object itself. */ public AzureFileShareReference withAzureFileUrl(String azureFileUrl) { this.azureFileUrl = azureFileUrl; return this; } /** * Get the credentials value. * * @return the credentials value */ public AzureStorageCredentialsInfo credentials() { return this.credentials; } /** * Set the credentials value. * * @param credentials the credentials value to set * @return the AzureFileShareReference object itself. */ public AzureFileShareReference withCredentials(AzureStorageCredentialsInfo credentials) { this.credentials = credentials; return this; } /** * Get the relativeMountPath value. * * @return the relativeMountPath value */ public String relativeMountPath() { return this.relativeMountPath; } /** * Set the relativeMountPath value. * * @param relativeMountPath the relativeMountPath value to set * @return the AzureFileShareReference object itself. */ public AzureFileShareReference withRelativeMountPath(String relativeMountPath) { this.relativeMountPath = relativeMountPath; return this; } /** * Get the fileMode value. * * @return the fileMode value */ public String fileMode() { return this.fileMode; } /** * Set the fileMode value. * * @param fileMode the fileMode value to set * @return the AzureFileShareReference object itself. */ public AzureFileShareReference withFileMode(String fileMode) { this.fileMode = fileMode; return this; } /** * Get the directoryMode value. * * @return the directoryMode value */ public String directoryMode() { return this.directoryMode; } /** * Set the directoryMode value. * * @param directoryMode the directoryMode value to set * @return the AzureFileShareReference object itself. */ public AzureFileShareReference withDirectoryMode(String directoryMode) { this.directoryMode = directoryMode; return this; } }
package com.ualberta.nyitrai.nyitrai_sizebook; import android.widget.ArrayAdapter; import java.util.ArrayList; /** * Created by nyitrai on 2/4/2017. */ public class SizeBook extends SModel<SView> { protected ArrayList<Record> records; public ArrayList<Record> getRecords() { return records; } public void setRecords(ArrayList<Record> newRecords) { this.records = newRecords; } public void newRecord(Record record) { records.add(record); notifyViews(); } public void deleteRecord(Record record) { records.remove(record); notifyViews(); } SizeBook() { super(); } }
package edu.anadolu; import com.google.gson.Gson; import com.google.gson.GsonBuilder; import java.io.IOException; import java.lang.reflect.Array; import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Map; public class Solution { ArrayList<Depots> solution = new ArrayList<>(); public Solution(ArrayList<Depots> solution) { this.solution = solution; } static class Depots { String depot; ArrayList<String> routes; public Depots(String depot, ArrayList<String> routes) { this.depot = depot; this.routes = routes; } } public static void solutionToJson(MoveOperations moveOperations, int depotNumber, int salesNummber, boolean verbose) throws IOException { GsonBuilder builder = new GsonBuilder().setPrettyPrinting(); Gson gson = builder.create(); ArrayList<Solution.Depots> depots = new ArrayList<Solution.Depots>(); for (Map.Entry<Integer, ArrayList<ArrayList<Integer>>> a : moveOperations.getDepots().entrySet() ) { String depot; if (!verbose) depot = Integer.toString(a.getKey()); else depot = TurkishNetwork.cities[a.getKey()]; ArrayList<ArrayList<Integer>> routes = a.getValue(); ArrayList<String> stringRoutes = new ArrayList<>(); String route = ""; for (int i = 0; i < routes.size(); i++) { for (int j = 0; j < routes.get(i).size(); j++) { if (j != routes.get(i).size() - 1) { if (!verbose) { route += routes.get(i).get(j) + " "; } else route += TurkishNetwork.cities[routes.get(i).get(j)] + " "; } else { if (!verbose) route += routes.get(i).get(j); else route += TurkishNetwork.cities[routes.get(i).get(j)]; } } stringRoutes.add(route); route = ""; } depots.add(new Solution.Depots(depot, stringRoutes)); } Solution solution = new Solution(depots); String jsonPath = "solution_" + "d" + depotNumber + "s" + salesNummber + ".json"; byte[] jsonBytes = gson.toJson(solution).getBytes(); Files.write(Paths.get(jsonPath), jsonBytes); System.out.println(gson.toJson(solution)); } }
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.datanode; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.hdfs.util.StripedBlockUtil; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.Daemon; import org.slf4j.Logger; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BLOCK_GROUP_INDEX_MASK; import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength; /** * This class handles the block recovery work commands. */ @InterfaceAudience.Private public class BlockRecoveryWorker { public static final Logger LOG = DataNode.LOG; private final DataNode datanode; private final Configuration conf; private final DNConf dnConf; BlockRecoveryWorker(DataNode datanode) { this.datanode = datanode; conf = datanode.getConf(); dnConf = datanode.getDnConf(); } /** A convenient class used in block recovery. */ static class BlockRecord { private final DatanodeID id; private final InterDatanodeProtocol datanode; private final ReplicaRecoveryInfo rInfo; private String storageID; BlockRecord(DatanodeID id, InterDatanodeProtocol datanode, ReplicaRecoveryInfo rInfo) { this.id = id; this.datanode = datanode; this.rInfo = rInfo; } private void updateReplicaUnderRecovery(String bpid, long recoveryId, long newBlockId, long newLength) throws IOException { final ExtendedBlock b = new ExtendedBlock(bpid, rInfo); storageID = datanode.updateReplicaUnderRecovery(b, recoveryId, newBlockId, newLength); } public ReplicaRecoveryInfo getReplicaRecoveryInfo(){ return rInfo; } @Override public String toString() { return "block:" + rInfo + " node:" + id; } } /** A block recovery task for a contiguous block. */ class RecoveryTaskContiguous { private final RecoveringBlock rBlock; private final ExtendedBlock block; private final String bpid; private final DatanodeInfo[] locs; private final long recoveryId; RecoveryTaskContiguous(RecoveringBlock rBlock) { this.rBlock = rBlock; block = rBlock.getBlock(); bpid = block.getBlockPoolId(); locs = rBlock.getLocations(); recoveryId = rBlock.getNewGenerationStamp(); } protected void recover() throws IOException { List<BlockRecord> syncList = new ArrayList<>(locs.length); int errorCount = 0; int candidateReplicaCnt = 0; // Check generation stamps, replica size and state. Replica must satisfy // the following criteria to be included in syncList for recovery: // - Valid generation stamp // - Non-zero length // - Original state is RWR or better for(DatanodeID id : locs) { try { DatanodeID bpReg = new DatanodeID( datanode.getBPOfferService(bpid).bpRegistration); InterDatanodeProtocol proxyDN = bpReg.equals(id)? datanode: DataNode.createInterDataNodeProtocolProxy(id, conf, dnConf.socketTimeout, dnConf.connectToDnViaHostname); ReplicaRecoveryInfo info = callInitReplicaRecovery(proxyDN, rBlock); if (info != null && info.getGenerationStamp() >= block.getGenerationStamp() && info.getNumBytes() > 0) { // Count the number of candidate replicas received. ++candidateReplicaCnt; if (info.getOriginalReplicaState().getValue() <= ReplicaState.RWR.getValue()) { syncList.add(new BlockRecord(id, proxyDN, info)); } else { if (LOG.isDebugEnabled()) { LOG.debug("Block recovery: Ignored replica with invalid " + "original state: " + info + " from DataNode: " + id); } } } else { if (LOG.isDebugEnabled()) { if (info == null) { LOG.debug("Block recovery: DataNode: " + id + " does not have " + "replica for block: " + block); } else { LOG.debug("Block recovery: Ignored replica with invalid " + "generation stamp or length: " + info + " from " + "DataNode: " + id); } } } } catch (RecoveryInProgressException ripE) { InterDatanodeProtocol.LOG.warn( "Recovery for replica " + block + " on data-node " + id + " is already in progress. Recovery id = " + rBlock.getNewGenerationStamp() + " is aborted.", ripE); return; } catch (IOException e) { ++errorCount; InterDatanodeProtocol.LOG.warn("Failed to recover block (block=" + block + ", datanode=" + id + ")", e); } } if (errorCount == locs.length) { throw new IOException("All datanodes failed: block=" + block + ", datanodeids=" + Arrays.asList(locs)); } // None of the replicas reported by DataNodes has the required original // state, report the error. if (candidateReplicaCnt > 0 && syncList.isEmpty()) { throw new IOException("Found " + candidateReplicaCnt + " replica(s) for block " + block + " but none is in " + ReplicaState.RWR.name() + " or better state. datanodeids=" + Arrays.asList(locs)); } syncBlock(syncList); } /** Block synchronization. */ void syncBlock(List<BlockRecord> syncList) throws IOException { DatanodeProtocolClientSideTranslatorPB nn = getActiveNamenodeForBP(block.getBlockPoolId()); boolean isTruncateRecovery = rBlock.getNewBlock() != null; long blockId = (isTruncateRecovery) ? rBlock.getNewBlock().getBlockId() : block.getBlockId(); if (LOG.isDebugEnabled()) { LOG.debug("block=" + block + ", (length=" + block.getNumBytes() + "), syncList=" + syncList); } // syncList.isEmpty() means that all data-nodes do not have the block // or their replicas have 0 length. // The block can be deleted. if (syncList.isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug("syncBlock for block " + block + ", all datanodes don't " + "have the block or their replicas have 0 length. The block can " + "be deleted."); } nn.commitBlockSynchronization(block, recoveryId, 0, true, true, DatanodeID.EMPTY_ARRAY, null); return; } // Calculate the best available replica state. ReplicaState bestState = ReplicaState.RWR; long finalizedLength = -1; for (BlockRecord r : syncList) { assert r.rInfo.getNumBytes() > 0 : "zero length replica"; ReplicaState rState = r.rInfo.getOriginalReplicaState(); if (rState.getValue() < bestState.getValue()) { bestState = rState; } if(rState == ReplicaState.FINALIZED) { if (finalizedLength > 0 && finalizedLength != r.rInfo.getNumBytes()) { throw new IOException("Inconsistent size of finalized replicas. " + "Replica " + r.rInfo + " expected size: " + finalizedLength); } finalizedLength = r.rInfo.getNumBytes(); } } // Calculate list of nodes that will participate in the recovery // and the new block size List<BlockRecord> participatingList = new ArrayList<>(); final ExtendedBlock newBlock = new ExtendedBlock(bpid, blockId, -1, recoveryId); switch(bestState) { case FINALIZED: assert finalizedLength > 0 : "finalizedLength is not positive"; for(BlockRecord r : syncList) { ReplicaState rState = r.rInfo.getOriginalReplicaState(); if (rState == ReplicaState.FINALIZED || rState == ReplicaState.RBW && r.rInfo.getNumBytes() == finalizedLength) { participatingList.add(r); } if (LOG.isDebugEnabled()) { LOG.debug("syncBlock replicaInfo: block=" + block + ", from datanode " + r.id + ", receivedState=" + rState.name() + ", receivedLength=" + r.rInfo.getNumBytes() + ", bestState=FINALIZED, finalizedLength=" + finalizedLength); } } newBlock.setNumBytes(finalizedLength); break; case RBW: case RWR: long minLength = Long.MAX_VALUE; for(BlockRecord r : syncList) { ReplicaState rState = r.rInfo.getOriginalReplicaState(); if(rState == bestState) { minLength = Math.min(minLength, r.rInfo.getNumBytes()); participatingList.add(r); } if (LOG.isDebugEnabled()) { LOG.debug("syncBlock replicaInfo: block=" + block + ", from datanode " + r.id + ", receivedState=" + rState.name() + ", receivedLength=" + r.rInfo.getNumBytes() + ", bestState=" + bestState.name()); } } // recover() guarantees syncList will have at least one replica with RWR // or better state. assert minLength != Long.MAX_VALUE : "wrong minLength"; newBlock.setNumBytes(minLength); break; case RUR: case TEMPORARY: assert false : "bad replica state: " + bestState; default: break; // we have 'case' all enum values } if (isTruncateRecovery) { newBlock.setNumBytes(rBlock.getNewBlock().getNumBytes()); } List<DatanodeID> failedList = new ArrayList<>(); final List<BlockRecord> successList = new ArrayList<>(); for (BlockRecord r : participatingList) { try { r.updateReplicaUnderRecovery(bpid, recoveryId, blockId, newBlock.getNumBytes()); successList.add(r); } catch (IOException e) { InterDatanodeProtocol.LOG.warn("Failed to updateBlock (newblock=" + newBlock + ", datanode=" + r.id + ")", e); failedList.add(r.id); } } // If any of the data-nodes failed, the recovery fails, because // we never know the actual state of the replica on failed data-nodes. // The recovery should be started over. if (!failedList.isEmpty()) { throw new IOException("Cannot recover " + block + ", the following datanodes failed: " + failedList); } // Notify the name-node about successfully recovered replicas. final DatanodeID[] datanodes = new DatanodeID[successList.size()]; final String[] storages = new String[datanodes.length]; for (int i = 0; i < datanodes.length; i++) { final BlockRecord r = successList.get(i); datanodes[i] = r.id; storages[i] = r.storageID; } if (LOG.isDebugEnabled()) { LOG.debug("Datanode triggering commitBlockSynchronization, block=" + block + ", newGs=" + newBlock.getGenerationStamp() + ", newLength=" + newBlock.getNumBytes()); } nn.commitBlockSynchronization(block, newBlock.getGenerationStamp(), newBlock.getNumBytes(), true, false, datanodes, storages); } } /** * blk_0 blk_1 blk_2 blk_3 blk_4 blk_5 blk_6 blk_7 blk_8 * 64k 64k 64k 64k 64k 64k 64k 64k 64k <-- stripe_0 * 64k 64k 64k 64k 64k 64k 64k 64k 64k * 64k 64k 64k 64k 64k 64k 64k 61k <-- startStripeIdx * 64k 64k 64k 64k 64k 64k 64k * 64k 64k 64k 64k 64k 64k 59k * 64k 64k 64k 64k 64k 64k * 64k 64k 64k 64k 64k 64k <-- last full stripe * 64k 64k 13k 64k 55k 3k <-- target last stripe * 64k 64k 64k 1k * 64k 64k 58k * 64k 64k * 64k 19k * 64k <-- total visible stripe * * Due to different speed of streamers, the internal blocks in a block group * could have different lengths when the block group isn't ended normally. * The purpose of this class is to recover the UnderConstruction block group, * so all internal blocks end at the same stripe. * * The steps: * 1. get all blocks lengths from DataNodes. * 2. calculate safe length, which is at the target last stripe. * 3. decode and feed blk_6~8, make them end at last full stripe. (the last * full stripe means the last decodable stripe.) * 4. encode the target last stripe, with the remaining sequential data. In * this case, the sequential data is 64k+64k+13k. Feed blk_6~8 the parity cells. * Overwrite the parity cell if have to. * 5. truncate the stripes from visible stripe, to target last stripe. * TODO: implement step 3,4 */ public class RecoveryTaskStriped { private final RecoveringBlock rBlock; private final ExtendedBlock block; private final String bpid; private final DatanodeInfo[] locs; private final long recoveryId; private final byte[] blockIndices; private final ErasureCodingPolicy ecPolicy; RecoveryTaskStriped(RecoveringStripedBlock rBlock) { this.rBlock = rBlock; // TODO: support truncate Preconditions.checkArgument(rBlock.getNewBlock() == null); block = rBlock.getBlock(); bpid = block.getBlockPoolId(); locs = rBlock.getLocations(); recoveryId = rBlock.getNewGenerationStamp(); blockIndices = rBlock.getBlockIndices(); ecPolicy = rBlock.getErasureCodingPolicy(); } protected void recover() throws IOException { checkLocations(locs.length); Map<Long, BlockRecord> syncBlocks = new HashMap<>(locs.length); final int dataBlkNum = ecPolicy.getNumDataUnits(); final int totalBlkNum = dataBlkNum + ecPolicy.getNumParityUnits(); //check generation stamps for (int i = 0; i < locs.length; i++) { DatanodeID id = locs[i]; try { DatanodeID bpReg = new DatanodeID( datanode.getBPOfferService(bpid).bpRegistration); InterDatanodeProtocol proxyDN = bpReg.equals(id) ? datanode : DataNode.createInterDataNodeProtocolProxy(id, conf, dnConf.socketTimeout, dnConf.connectToDnViaHostname); ExtendedBlock internalBlk = new ExtendedBlock(block); final long blockId = block.getBlockId() + blockIndices[i]; internalBlk.setBlockId(blockId); ReplicaRecoveryInfo info = callInitReplicaRecovery(proxyDN, new RecoveringBlock(internalBlk, null, recoveryId)); if (info != null && info.getGenerationStamp() >= block.getGenerationStamp() && info.getNumBytes() > 0) { final BlockRecord existing = syncBlocks.get(blockId); if (existing == null || info.getNumBytes() > existing.rInfo.getNumBytes()) { // if we have >1 replicas for the same internal block, we // simply choose the one with larger length. // TODO: better usage of redundant replicas syncBlocks.put(blockId, new BlockRecord(id, proxyDN, info)); } } } catch (RecoveryInProgressException ripE) { InterDatanodeProtocol.LOG.warn( "Recovery for replica " + block + " on data-node " + id + " is already in progress. Recovery id = " + rBlock.getNewGenerationStamp() + " is aborted.", ripE); return; } catch (IOException e) { InterDatanodeProtocol.LOG.warn("Failed to recover block (block=" + block + ", datanode=" + id + ")", e); } } checkLocations(syncBlocks.size()); final long safeLength = getSafeLength(syncBlocks); if (LOG.isDebugEnabled()) { LOG.debug("Recovering block " + block + ", length=" + block.getNumBytes() + ", safeLength=" + safeLength + ", syncList=" + syncBlocks); } // If some internal blocks reach the safe length, convert them to RUR List<BlockRecord> rurList = new ArrayList<>(locs.length); for (BlockRecord r : syncBlocks.values()) { int blockIndex = (int) (r.rInfo.getBlockId() & BLOCK_GROUP_INDEX_MASK); long newSize = getInternalBlockLength(safeLength, ecPolicy.getCellSize(), dataBlkNum, blockIndex); if (r.rInfo.getNumBytes() >= newSize) { rurList.add(r); } } assert rurList.size() >= dataBlkNum : "incorrect safe length"; // Recovery the striped block by truncating internal blocks to the safe // length. Abort if there is any failure in this step. truncatePartialBlock(rurList, safeLength); // notify Namenode the new size and locations final DatanodeID[] newLocs = new DatanodeID[totalBlkNum]; final String[] newStorages = new String[totalBlkNum]; for (int i = 0; i < totalBlkNum; i++) { newLocs[blockIndices[i]] = DatanodeID.EMPTY_DATANODE_ID; newStorages[blockIndices[i]] = ""; } for (BlockRecord r : rurList) { int index = (int) (r.rInfo.getBlockId() & HdfsServerConstants.BLOCK_GROUP_INDEX_MASK); newLocs[index] = r.id; newStorages[index] = r.storageID; } ExtendedBlock newBlock = new ExtendedBlock(bpid, block.getBlockId(), safeLength, recoveryId); DatanodeProtocolClientSideTranslatorPB nn = getActiveNamenodeForBP(bpid); nn.commitBlockSynchronization(block, newBlock.getGenerationStamp(), newBlock.getNumBytes(), true, false, newLocs, newStorages); } private void truncatePartialBlock(List<BlockRecord> rurList, long safeLength) throws IOException { int cellSize = ecPolicy.getCellSize(); int dataBlkNum = ecPolicy.getNumDataUnits(); List<DatanodeID> failedList = new ArrayList<>(); for (BlockRecord r : rurList) { int blockIndex = (int) (r.rInfo.getBlockId() & BLOCK_GROUP_INDEX_MASK); long newSize = getInternalBlockLength(safeLength, cellSize, dataBlkNum, blockIndex); try { r.updateReplicaUnderRecovery(bpid, recoveryId, r.rInfo.getBlockId(), newSize); } catch (IOException e) { InterDatanodeProtocol.LOG.warn("Failed to updateBlock (newblock=" + ", datanode=" + r.id + ")", e); failedList.add(r.id); } } // If any of the data-nodes failed, the recovery fails, because // we never know the actual state of the replica on failed data-nodes. // The recovery should be started over. if (!failedList.isEmpty()) { throw new IOException("Cannot recover " + block + ", the following datanodes failed: " + failedList); } } /** * TODO: the current implementation depends on the assumption that the * parity cells are only generated based on the full stripe. This is not * true after we support hflush. */ @VisibleForTesting long getSafeLength(Map<Long, BlockRecord> syncBlocks) { final int dataBlkNum = ecPolicy.getNumDataUnits(); Preconditions.checkArgument(syncBlocks.size() >= dataBlkNum); long[] blockLengths = new long[syncBlocks.size()]; int i = 0; for (BlockRecord r : syncBlocks.values()) { ReplicaRecoveryInfo rInfo = r.getReplicaRecoveryInfo(); blockLengths[i++] = rInfo.getNumBytes(); } return StripedBlockUtil.getSafeLength(ecPolicy, blockLengths); } private void checkLocations(int locationCount) throws IOException { if (locationCount < ecPolicy.getNumDataUnits()) { throw new IOException(block + " has no enough internal blocks" + ", unable to start recovery. Locations=" + Arrays.asList(locs)); } } } private static void logRecoverBlock(String who, RecoveringBlock rb) { ExtendedBlock block = rb.getBlock(); DatanodeInfo[] targets = rb.getLocations(); LOG.info(who + " calls recoverBlock(" + block + ", targets=[" + Joiner.on(", ").join(targets) + "]" + ", newGenerationStamp=" + rb.getNewGenerationStamp() + ", newBlock=" + rb.getNewBlock() + ", isStriped=" + rb.isStriped() + ")"); } /** * Convenience method, which unwraps RemoteException. * @throws IOException not a RemoteException. */ private static ReplicaRecoveryInfo callInitReplicaRecovery( InterDatanodeProtocol datanode, RecoveringBlock rBlock) throws IOException { try { return datanode.initReplicaRecovery(rBlock); } catch(RemoteException re) { throw re.unwrapRemoteException(); } } /** * Get the NameNode corresponding to the given block pool. * * @param bpid Block pool Id * @return Namenode corresponding to the bpid * @throws IOException if unable to get the corresponding NameNode */ DatanodeProtocolClientSideTranslatorPB getActiveNamenodeForBP( String bpid) throws IOException { BPOfferService bpos = datanode.getBPOfferService(bpid); if (bpos == null) { throw new IOException("No block pool offer service for bpid=" + bpid); } DatanodeProtocolClientSideTranslatorPB activeNN = bpos.getActiveNN(); if (activeNN == null) { throw new IOException( "Block pool " + bpid + " has not recognized an active NN"); } return activeNN; } public Daemon recoverBlocks(final String who, final Collection<RecoveringBlock> blocks) { Daemon d = new Daemon(datanode.threadGroup, new Runnable() { @Override public void run() { for(RecoveringBlock b : blocks) { try { logRecoverBlock(who, b); if (b.isStriped()) { new RecoveryTaskStriped((RecoveringStripedBlock) b).recover(); } else { new RecoveryTaskContiguous(b).recover(); } } catch (IOException e) { LOG.warn("recoverBlocks FAILED: " + b, e); } } } }); d.start(); return d; } }
package com.ssepulveda.commons; public class Converters { public static String byteArrayToHexString(byte[] a) { StringBuilder sb = new StringBuilder(a.length * 2); for(byte b: a) sb.append(String.format("%02x", b)); return sb.toString(); } }
// // This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference // Implementation, v2.2.7 // See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a> // Any modifications to this file will be lost upon recompilation of the source schema. // Generated on: 2020.10.05 at 10:58:10 AM IST // package org.hl7.v3; import javax.xml.bind.annotation.XmlEnum; import javax.xml.bind.annotation.XmlType; /** * Java class for hasSupport. * * <p>The following schema fragment specifies the expected content contained within this class. * * <p> * * <pre> * &lt;simpleType name="hasSupport"> * &lt;restriction base="{urn:hl7-org:v3}cs"> * &lt;enumeration value="SPRT"/> * &lt;enumeration value="SPRTBND"/> * &lt;/restriction> * &lt;/simpleType> * </pre> */ @XmlType(name = "hasSupport") @XmlEnum public enum HasSupport { SPRT, SPRTBND; public String value() { return name(); } public static HasSupport fromValue(String v) { return valueOf(v); } }
package net.synerghetic.synerg.activity; import android.support.design.widget.TabLayout; import android.support.design.widget.FloatingActionButton; import android.support.design.widget.Snackbar; import android.support.v7.app.AppCompatActivity; import android.support.v7.widget.Toolbar; import android.support.v4.app.Fragment; import android.support.v4.app.FragmentManager; import android.support.v4.app.FragmentPagerAdapter; import android.support.v4.view.ViewPager; import android.os.Bundle; import android.view.Menu; import android.view.MenuItem; import android.view.View; import net.synerghetic.synerg.R; import net.synerghetic.synerg.fragments.Tab1Bureau; import net.synerghetic.synerg.fragments.Tab2Activite; import net.synerghetic.synerg.fragments.Tab3Apropos; import net.synerghetic.synerg.model.Instagram; import net.synerghetic.synerg.network.InstagramService; import java.util.List; public class MainActivity extends AppCompatActivity { /** * The {@link android.support.v4.view.PagerAdapter} that will provide * fragments for each of the sections. We use a * {@link FragmentPagerAdapter} derivative, which will keep every * loaded fragment in memory. If this becomes too memory intensive, it * may be best to switch to a * {@link android.support.v4.app.FragmentStatePagerAdapter}. */ private SectionsPagerAdapter mSectionsPagerAdapter; /** * The {@link ViewPager} that will host the section contents. */ private ViewPager mViewPager; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar); setSupportActionBar(toolbar); // Create the adapter that will return a fragment for each of the three // primary sections of the activity. mSectionsPagerAdapter = new SectionsPagerAdapter(getSupportFragmentManager()); // Set up the ViewPager with the sections adapter. mViewPager = (ViewPager) findViewById(R.id.container); mViewPager.setAdapter(mSectionsPagerAdapter); TabLayout tabLayout = (TabLayout) findViewById(R.id.tabs); tabLayout.setupWithViewPager(mViewPager); FloatingActionButton fab = (FloatingActionButton) findViewById(R.id.fab); fab.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { Snackbar.make(view, "Replace with your own action", Snackbar.LENGTH_LONG) .setAction("Action", null).show(); } }); } @Override public boolean onCreateOptionsMenu(Menu menu) { // Inflate the menu; this adds items to the action bar if it is present. getMenuInflater().inflate(R.menu.menu_main, menu); return true; } @Override public boolean onOptionsItemSelected(MenuItem item) { // Handle action bar item clicks here. The action bar will // automatically handle clicks on the Home/Up button, so long // as you specify a parent activity in AndroidManifest.xml. int id = item.getItemId(); //noinspection SimplifiableIfStatement if (id == R.id.action_settings) { return true; } return super.onOptionsItemSelected(item); } /** * A placeholder fragment containing a simple view. */ /**public static class PlaceholderFragment extends Fragment { private static final String ARG_SECTION_NUMBER = "section_number"; public PlaceholderFragment() { } public static PlaceholderFragment newInstance(int sectionNumber) { PlaceholderFragment fragment = new PlaceholderFragment(); Bundle args = new Bundle(); args.putInt(ARG_SECTION_NUMBER, sectionNumber); fragment.setArguments(args); return fragment; } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View rootView = inflater.inflate(R.layout.tab1bureau, container, false); TextView textView = (TextView) rootView.findViewById(R.id.section_label); textView.setText(getString(R.string.section_format, getArguments().getInt(ARG_SECTION_NUMBER))); return rootView; } }**/ /** * A {@link FragmentPagerAdapter} that returns a fragment corresponding to * one of the sections/tabs/pages. */ public class SectionsPagerAdapter extends FragmentPagerAdapter { public SectionsPagerAdapter(FragmentManager fm) { super(fm); } @Override public Fragment getItem(int position) { // getItem is called to instantiate the fragment for the given page. // Return a PlaceholderFragment (defined as a static inner class below). //return PlaceholderFragment.newInstance(position + 1); switch (position) { case 0: Tab1Bureau tab1 = new Tab1Bureau(); return tab1; case 1: Tab2Activite tab2 = new Tab2Activite(); return tab2; case 2: Tab3Apropos tab3 = new Tab3Apropos(); return tab3; default: return null; } } @Override public int getCount() { // Show 3 total pages. return 3; } @Override public CharSequence getPageTitle(int position) { switch (position) { case 0: return "BUREAU"; case 1: return "ACTIVITÉ"; case 2: return "À PROPOS"; } return null; } } }
/* AUTO-GENERATED FILE. DO NOT MODIFY. * * This class was automatically generated by the * aapt tool from the resource data it found. It * should not be modified by hand. */ package com.example.android.xmladapters; public final class R { public static final class attr { /** The type of binding. If this value is not specified, the type will be inferred from the type of the "to" target view. Mandatory. The type can be one of: <ul> <li>string, The content of the column is interpreted as a string.</li> <li>image, The content of the column is interpreted as a blob describing an image.</li> <li>image-uri, The content of the column is interpreted as a URI to an image.</li> <li>drawable, The content of the column is interpreted as a resource id to a drawable.</li> <li>A fully qualified class name, corresponding to an implementation of android.widget.Adapters.CursorBinder.</li> </ul> <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. */ public static final int as=0x7f010006; /** The name of the column to select. Mandatory. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. */ public static final int column=0x7f010007; /** The name of the column to bind from. Mandatory. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. */ public static final int from=0x7f010004; /** The original value from the column. Mandatory. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. */ public static final int fromValue=0x7f010008; /** Layout resource used to display each row from the cursor. Mandatory. <p>Must be a reference to another resource, in the form "<code>@[+][<i>package</i>:]<i>type</i>:<i>name</i></code>" or to a theme attribute in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>". */ public static final int layout=0x7f010003; /** Selection statement for the query. Optional. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. */ public static final int selection=0x7f010001; /** Sort order statement for the query. Optional. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. */ public static final int sortOrder=0x7f010002; /** The resource id of the view to bind to. Mandatory. <p>Must be a reference to another resource, in the form "<code>@[+][<i>package</i>:]<i>type</i>:<i>name</i></code>" or to a theme attribute in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>". */ public static final int to=0x7f010005; /** The new value from the column. Mandatory. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. */ public static final int toValue=0x7f010009; /** URI to get the cursor from. Optional. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. */ public static final int uri=0x7f010000; /** The transformation class, an implementation of android.widget.Adapters.CursorTransformation. Mandatory if "withExpression" is not specified. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. */ public static final int withClass=0x7f01000b; /** The transformation expression. Mandatory if "withClass" is not specified. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. */ public static final int withExpression=0x7f01000a; } public static final class drawable { public static final int ic_contact_picture=0x7f020000; } public static final class id { public static final int date=0x7f060005; public static final int description=0x7f060007; public static final int image=0x7f060006; public static final int item_layout=0x7f060004; public static final int name=0x7f060000; public static final int photo=0x7f060002; public static final int star=0x7f060001; public static final int title=0x7f060003; } public static final class layout { public static final int contact_item=0x7f030000; public static final int contacts_list=0x7f030001; public static final int photo_item=0x7f030002; public static final int photos_list=0x7f030003; public static final int rss_feed_item=0x7f030004; public static final int rss_feeds_list=0x7f030005; } public static final class string { public static final int app_name=0x7f050000; public static final int contacts_list_activity=0x7f050001; public static final int no_contacts=0x7f050004; public static final int no_photos=0x7f050005; public static final int no_rss_feed=0x7f050006; public static final int photos_list_activity=0x7f050002; public static final int rss_reader_activity=0x7f050003; } public static final class xml { public static final int contacts=0x7f040000; public static final int photos=0x7f040001; public static final int rss_feed=0x7f040002; } public static final class styleable { /** Adapter used to bind cursors. <p>Includes the following attributes:</p> <table> <colgroup align="left" /> <colgroup align="left" /> <tr><th>Attribute</th><th>Description</th></tr> <tr><td><code>{@link #CursorAdapter_layout com.example.android.xmladapters:layout}</code></td><td> Layout resource used to display each row from the cursor.</td></tr> <tr><td><code>{@link #CursorAdapter_selection com.example.android.xmladapters:selection}</code></td><td> Selection statement for the query.</td></tr> <tr><td><code>{@link #CursorAdapter_sortOrder com.example.android.xmladapters:sortOrder}</code></td><td> Sort order statement for the query.</td></tr> <tr><td><code>{@link #CursorAdapter_uri com.example.android.xmladapters:uri}</code></td><td> URI to get the cursor from.</td></tr> </table> @see #CursorAdapter_layout @see #CursorAdapter_selection @see #CursorAdapter_sortOrder @see #CursorAdapter_uri */ public static final int[] CursorAdapter = { 0x7f010000, 0x7f010001, 0x7f010002, 0x7f010003 }; /** <p> @attr description Layout resource used to display each row from the cursor. Mandatory. <p>Must be a reference to another resource, in the form "<code>@[+][<i>package</i>:]<i>type</i>:<i>name</i></code>" or to a theme attribute in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>". <p>This is a private symbol. @attr name android:layout */ public static final int CursorAdapter_layout = 3; /** <p> @attr description Selection statement for the query. Optional. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. <p>This is a private symbol. @attr name android:selection */ public static final int CursorAdapter_selection = 1; /** <p> @attr description Sort order statement for the query. Optional. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. <p>This is a private symbol. @attr name android:sortOrder */ public static final int CursorAdapter_sortOrder = 2; /** <p> @attr description URI to get the cursor from. Optional. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. <p>This is a private symbol. @attr name android:uri */ public static final int CursorAdapter_uri = 0; /** Attributes used in bind items for XML cursor adapters. <p>Includes the following attributes:</p> <table> <colgroup align="left" /> <colgroup align="left" /> <tr><th>Attribute</th><th>Description</th></tr> <tr><td><code>{@link #CursorAdapter_BindItem_as com.example.android.xmladapters:as}</code></td><td> The type of binding.</td></tr> <tr><td><code>{@link #CursorAdapter_BindItem_from com.example.android.xmladapters:from}</code></td><td> The name of the column to bind from.</td></tr> <tr><td><code>{@link #CursorAdapter_BindItem_to com.example.android.xmladapters:to}</code></td><td> The resource id of the view to bind to.</td></tr> </table> @see #CursorAdapter_BindItem_as @see #CursorAdapter_BindItem_from @see #CursorAdapter_BindItem_to */ public static final int[] CursorAdapter_BindItem = { 0x7f010004, 0x7f010005, 0x7f010006 }; /** <p> @attr description The type of binding. If this value is not specified, the type will be inferred from the type of the "to" target view. Mandatory. The type can be one of: <ul> <li>string, The content of the column is interpreted as a string.</li> <li>image, The content of the column is interpreted as a blob describing an image.</li> <li>image-uri, The content of the column is interpreted as a URI to an image.</li> <li>drawable, The content of the column is interpreted as a resource id to a drawable.</li> <li>A fully qualified class name, corresponding to an implementation of android.widget.Adapters.CursorBinder.</li> </ul> <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. <p>This is a private symbol. @attr name android:as */ public static final int CursorAdapter_BindItem_as = 2; /** <p> @attr description The name of the column to bind from. Mandatory. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. <p>This is a private symbol. @attr name android:from */ public static final int CursorAdapter_BindItem_from = 0; /** <p> @attr description The resource id of the view to bind to. Mandatory. <p>Must be a reference to another resource, in the form "<code>@[+][<i>package</i>:]<i>type</i>:<i>name</i></code>" or to a theme attribute in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>". <p>This is a private symbol. @attr name android:to */ public static final int CursorAdapter_BindItem_to = 1; /** Attributes used to map values to new values in XML cursor adapters' bind items. <p>Includes the following attributes:</p> <table> <colgroup align="left" /> <colgroup align="left" /> <tr><th>Attribute</th><th>Description</th></tr> <tr><td><code>{@link #CursorAdapter_MapItem_fromValue com.example.android.xmladapters:fromValue}</code></td><td> The original value from the column.</td></tr> <tr><td><code>{@link #CursorAdapter_MapItem_toValue com.example.android.xmladapters:toValue}</code></td><td> The new value from the column.</td></tr> </table> @see #CursorAdapter_MapItem_fromValue @see #CursorAdapter_MapItem_toValue */ public static final int[] CursorAdapter_MapItem = { 0x7f010008, 0x7f010009 }; /** <p> @attr description The original value from the column. Mandatory. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. <p>This is a private symbol. @attr name android:fromValue */ public static final int CursorAdapter_MapItem_fromValue = 0; /** <p> @attr description The new value from the column. Mandatory. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. <p>This is a private symbol. @attr name android:toValue */ public static final int CursorAdapter_MapItem_toValue = 1; /** Attributes used in select items for XML cursor adapters. <p>Includes the following attributes:</p> <table> <colgroup align="left" /> <colgroup align="left" /> <tr><th>Attribute</th><th>Description</th></tr> <tr><td><code>{@link #CursorAdapter_SelectItem_column com.example.android.xmladapters:column}</code></td><td> The name of the column to select.</td></tr> </table> @see #CursorAdapter_SelectItem_column */ public static final int[] CursorAdapter_SelectItem = { 0x7f010007 }; /** <p> @attr description The name of the column to select. Mandatory. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. <p>This is a private symbol. @attr name android:column */ public static final int CursorAdapter_SelectItem_column = 0; /** Attributes used to map values to new values in XML cursor adapters' bind items. <p>Includes the following attributes:</p> <table> <colgroup align="left" /> <colgroup align="left" /> <tr><th>Attribute</th><th>Description</th></tr> <tr><td><code>{@link #CursorAdapter_TransformItem_withClass com.example.android.xmladapters:withClass}</code></td><td> The transformation class, an implementation of android.</td></tr> <tr><td><code>{@link #CursorAdapter_TransformItem_withExpression com.example.android.xmladapters:withExpression}</code></td><td> The transformation expression.</td></tr> </table> @see #CursorAdapter_TransformItem_withClass @see #CursorAdapter_TransformItem_withExpression */ public static final int[] CursorAdapter_TransformItem = { 0x7f01000a, 0x7f01000b }; /** <p> @attr description The transformation class, an implementation of android.widget.Adapters.CursorTransformation. Mandatory if "withExpression" is not specified. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. <p>This is a private symbol. @attr name android:withClass */ public static final int CursorAdapter_TransformItem_withClass = 1; /** <p> @attr description The transformation expression. Mandatory if "withClass" is not specified. <p>Must be a string value, using '\\;' to escape characters such as '\\n' or '\\uxxxx' for a unicode character. <p>This may also be a reference to a resource (in the form "<code>@[<i>package</i>:]<i>type</i>:<i>name</i></code>") or theme attribute (in the form "<code>?[<i>package</i>:][<i>type</i>:]<i>name</i></code>") containing a value of this type. <p>This is a private symbol. @attr name android:withExpression */ public static final int CursorAdapter_TransformItem_withExpression = 0; }; }
package com.example.doandidong.Data.KhachHang; import java.io.Serializable; public class NhomKhachHang implements Serializable { String tenNhomKh; String maKH; String ghichuNhom; String id; public NhomKhachHang() { } public NhomKhachHang(String tenNhomKh, String maKH, String ghichuNhom, String id) { this.tenNhomKh = tenNhomKh; this.maKH = maKH; this.ghichuNhom = ghichuNhom; this.id = id; } public NhomKhachHang(String tenNhomKh, String maKH, String ghichuNhom) { this.tenNhomKh = tenNhomKh; this.maKH = maKH; this.ghichuNhom = ghichuNhom; } public String getId() { return id; } public void setId(String id) { this.id = id; } public String getTenNhomKh() { return tenNhomKh; } public void setTenNhomKh(String tenNhomKh) { this.tenNhomKh = tenNhomKh; } public String getMaKH() { return maKH; } public void setMaKH(String maKH) { this.maKH = maKH; } public String getGhichuNhom() { return ghichuNhom; } public void setGhichuNhom(String ghichuNhom) { this.ghichuNhom = ghichuNhom; } }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.tika.fork; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.net.URL; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.HashSet; import java.util.List; import java.util.Set; class ClassLoaderProxy extends ClassLoader implements ForkProxy { /** * Serial version UID */ private static final long serialVersionUID = -7303109260448540420L; /** * Names of resources that could not be found. Used to avoid repeated * lookup of commonly accessed, but often not present, resources like * <code>META-INF/services/javax.xml.parsers.SAXParserFactory</code>. */ private final Set<String> notFound = new HashSet<>(); private final int resource; private transient DataInputStream input; private transient DataOutputStream output; public ClassLoaderProxy(int resource) { this.resource = resource; } public void init(DataInputStream input, DataOutputStream output) { this.input = input; this.output = output; } @Override protected synchronized URL findResource(String name) { if (notFound.contains(name)) { return null; } try { // Send a request to load the resource data output.write(ForkServer.RESOURCE); output.write(resource); output.write(1); output.writeUTF(name); output.flush(); // Receive the response if (input.readBoolean()) { return MemoryURLStreamHandler.createURL(readStream()); } else { notFound.add(name); return null; } } catch (IOException e) { return null; } } @Override protected synchronized Enumeration<URL> findResources(String name) throws IOException { // Send a request to load the resources output.write(ForkServer.RESOURCE); output.write(resource); output.write(2); output.writeUTF(name); output.flush(); // Receive the response List<URL> resources = new ArrayList<>(); while (input.readBoolean()) { resources.add(MemoryURLStreamHandler.createURL(readStream())); } return Collections.enumeration(resources); } @Override protected synchronized Class<?> findClass(String name) throws ClassNotFoundException { try { // Send a request to load the class data output.write(ForkServer.RESOURCE); output.write(resource); output.write(1); output.writeUTF(name.replace('.', '/') + ".class"); output.flush(); // Receive the response if (input.readBoolean()) { byte[] data = readStream(); Class<?> clazz = defineClass(name, data, 0, data.length); definePackageIfNecessary(name, clazz); return clazz; } else { throw new ClassNotFoundException("Unable to find class " + name); } } catch (IOException e) { throw new ClassNotFoundException("Unable to load class " + name, e); } } private void definePackageIfNecessary(String className, Class<?> clazz) { String packageName = toPackageName(className); if (packageName != null && getPackage(packageName) == null) { definePackage(packageName, null, null, null, null, null, null, null); } } private String toPackageName(String className) { int packageEndIndex = className.lastIndexOf('.'); if (packageEndIndex > 0) { return className.substring(0, packageEndIndex); } return null; } private byte[] readStream() throws IOException { ByteArrayOutputStream stream = new ByteArrayOutputStream(); byte[] buffer = new byte[0xffff]; int n; while ((n = input.readUnsignedShort()) > 0) { input.readFully(buffer, 0, n); stream.write(buffer, 0, n); } return stream.toByteArray(); } }
/* * Licensed to Elasticsearch B.V. under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch B.V. licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ //---------------------------------------------------- // THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. //---------------------------------------------------- package co.elastic.clients.elasticsearch.snapshot; import co.elastic.clients.base.ElasticsearchError; import co.elastic.clients.base.Endpoint; import co.elastic.clients.elasticsearch._types.RequestBase; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; import co.elastic.clients.json.ObjectDeserializer; import co.elastic.clients.util.ObjectBuilder; import jakarta.json.JsonValue; import jakarta.json.stream.JsonGenerator; import java.lang.String; import java.util.HashMap; import java.util.Map; import java.util.Objects; import javax.annotation.Nullable; // typedef: snapshot.delete.Request public final class DeleteRequest extends RequestBase { private final String repository; private final String snapshot; @Nullable private final JsonValue masterTimeout; // --------------------------------------------------------------------------------------------- protected DeleteRequest(Builder builder) { this.repository = Objects.requireNonNull(builder.repository, "repository"); this.snapshot = Objects.requireNonNull(builder.snapshot, "snapshot"); this.masterTimeout = builder.masterTimeout; } /** * API name: {@code repository} */ public String repository() { return this.repository; } /** * API name: {@code snapshot} */ public String snapshot() { return this.snapshot; } /** * API name: {@code master_timeout} */ @Nullable public JsonValue masterTimeout() { return this.masterTimeout; } // --------------------------------------------------------------------------------------------- /** * Builder for {@link DeleteRequest}. */ public static class Builder implements ObjectBuilder<DeleteRequest> { private String repository; private String snapshot; @Nullable private JsonValue masterTimeout; /** * API name: {@code repository} */ public Builder repository(String value) { this.repository = value; return this; } /** * API name: {@code snapshot} */ public Builder snapshot(String value) { this.snapshot = value; return this; } /** * API name: {@code master_timeout} */ public Builder masterTimeout(@Nullable JsonValue value) { this.masterTimeout = value; return this; } /** * Builds a {@link DeleteRequest}. * * @throws NullPointerException * if some of the required fields are null. */ public DeleteRequest build() { return new DeleteRequest(this); } } // --------------------------------------------------------------------------------------------- /** * Endpoint "{@code snapshot.delete}". */ public static final Endpoint<DeleteRequest, DeleteResponse, ElasticsearchError> ENDPOINT = new Endpoint.Simple<>( // Request method request -> { return "DELETE"; }, // Request path request -> { final int _repository = 1 << 0; final int _snapshot = 1 << 1; int propsSet = 0; if (request.repository() != null) propsSet |= _repository; if (request.snapshot() != null) propsSet |= _snapshot; if (propsSet == (_repository | _snapshot)) { StringBuilder buf = new StringBuilder(); buf.append("/_snapshot"); buf.append("/"); buf.append(request.repository); buf.append("/"); buf.append(request.snapshot); return buf.toString(); } throw Endpoint.Simple.noPathTemplateFound("path"); }, // Request parameters request -> { Map<String, String> params = new HashMap<>(); if (request.masterTimeout != null) { params.put("master_timeout", request.masterTimeout.toString()); } return params; }, Endpoint.Simple.emptyMap(), false, DeleteResponse.DESERIALIZER); }
/* * Copyright 2015 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.drl.ast.dsl.impl; import org.drools.drl.ast.dsl.AccumulateImportDescrBuilder; import org.drools.drl.ast.dsl.PackageDescrBuilder; import org.drools.drl.ast.descr.AccumulateImportDescr; public class AccumulateImportDescrBuilderImpl extends BaseDescrBuilderImpl<PackageDescrBuilder, AccumulateImportDescr> implements AccumulateImportDescrBuilder { protected AccumulateImportDescrBuilderImpl(PackageDescrBuilder parent) { super( parent, new AccumulateImportDescr() ); } public AccumulateImportDescrBuilder target( String target ) { descr.setTarget( target ); return this; } public AccumulateImportDescrBuilder functionName(String functionName) { descr.setFunctionName( functionName ); return this; } }
package com.app.barber.util.weekengine; import android.app.Activity; import android.support.v7.widget.RecyclerView; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ImageView; import com.app.barber.R; import com.app.barber.models.TimeSlotsModel; import com.app.barber.models.response.BlockedDatesResponse; import com.app.barber.models.response.FutureAppointmentStatusModel; import com.app.barber.util.GlobalValues; import com.app.barber.util.iface.OnItemClickListener; import com.app.barber.views.CustomTextView; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Calendar; import java.util.Date; import java.util.GregorianCalendar; import java.util.List; import butterknife.BindView; import butterknife.ButterKnife; import butterknife.OnClick; public class WeekViewAdapter extends RecyclerView.Adapter<RecyclerView.ViewHolder> { private List<ModelDay> specList; OnItemClickListener listener; Activity specialiseActivity; private boolean isClicable = true; private boolean isMultiselection;//is multiselection enabled public WeekViewAdapter(Activity specialiseActivity, List<ModelDay> feedsList, OnItemClickListener listener) { this.specList = feedsList; this.listener = listener; this.specialiseActivity = specialiseActivity; } @Override public SlotsViewHolder onCreateViewHolder(ViewGroup parent, int viewType) { View itemView = LayoutInflater.from(parent.getContext()) .inflate(R.layout.view_week_days_adapter, parent, false); return new SlotsViewHolder(itemView); } @Override public void onBindViewHolder(RecyclerView.ViewHolder holder, int position) { ModelDay positionData = specList.get(position); ((SlotsViewHolder) holder).weekDate.setText("" + positionData.getDate()); ((SlotsViewHolder) holder).weekDay.setText("" + positionData.getDay()); if (positionData.isSelected()) { ((SlotsViewHolder) holder).weekDate.setText("" + positionData.getDate()); ((SlotsViewHolder) holder).weekDate.setTextColor(specialiseActivity.getResources().getColor(R.color.color_white)); ((SlotsViewHolder) holder).weekDate.setBackgroundResource(R.drawable.circular_blue_background); } else { ((SlotsViewHolder) holder).weekDate.setText("" + positionData.getDate()); ((SlotsViewHolder) holder).weekDate.setTextColor(specialiseActivity.getResources().getColor(R.color.color_grey)); ((SlotsViewHolder) holder).weekDate.setBackgroundResource(R.color.color_white); } if (positionData.isApointmentAvailable()) { ((SlotsViewHolder) holder).appointStatus.setVisibility(View.VISIBLE); } else ((SlotsViewHolder) holder).appointStatus.setVisibility(View.GONE); if (positionData.isBlockedHours()) { ((SlotsViewHolder) holder).appointStatus.setVisibility(View.VISIBLE); } else ((SlotsViewHolder) holder).appointStatus.setVisibility(View.GONE); } @Override public int getItemCount() { return specList.size(); } public void updateAll(List<ModelDay> posts) { this.specList.clear(); this.specList.addAll(posts); notifyDataSetChanged(); } public void addItem(ModelDay posts) { this.specList.add(0, posts); notifyDataSetChanged(); } public String getSelectedDate() { return specList.get(0).getFullDate();//default selected date } /** * setCurrent days. */ public void setCurrentWeek(int iDay) { ArrayList<ModelDay> modelList = new ArrayList<>(); ModelDay mDay = new ModelDay(); SimpleDateFormat sdfFull = new SimpleDateFormat("MM/dd/yyyy"); SimpleDateFormat sdf = new SimpleDateFormat("EE"); SimpleDateFormat sdf1 = new SimpleDateFormat("dd"); for (int i = 0; i < iDay; i++) { Calendar calendar = new GregorianCalendar(); calendar.add(Calendar.DATE, i); String day = sdf.format(calendar.getTime()); String date = sdf1.format(calendar.getTime()); String fullDate = sdfFull.format(calendar.getTime()); Log.i("TAG", day + " " + date); mDay = new ModelDay(); mDay.setDate(Integer.parseInt(date)); mDay.setDay(day.substring(0, 2)); mDay.setFullDate(fullDate); if (i == 0) mDay.setSelected(true); modelList.add(mDay); } specList.clear(); specList.addAll(modelList); notifyDataSetChanged(); } /** * Get FirstDay */ public String getFirstDay() { return specList.get(0).fullDate; } /** * Get LastDay */ public String getEndDay() { return specList.get(specList.size() - 1).fullDate; } public void noDeafultselection() { isClicable = false; // for (int i = 0; i < specList.size(); i++) { // specList.get(i).setSelected(false); // } notifyDataSetChanged(); } /** * Get Previous Week */ public void getPastWeek(int iDay) { ArrayList<ModelDay> modelList = new ArrayList<>(); ModelDay mDay = new ModelDay(); SimpleDateFormat sdfFull = new SimpleDateFormat("MM/dd/yyyy"); SimpleDateFormat sdf = new SimpleDateFormat("EE"); SimpleDateFormat sdf1 = new SimpleDateFormat("dd"); for (int i = 1; i < iDay; i++) { Calendar calendar = new GregorianCalendar(); calendar.add(Calendar.DATE, -i); String day = sdf.format(calendar.getTime()); String date = sdf1.format(calendar.getTime()); String fullDate = sdfFull.format(calendar.getTime()); Log.i("TAG", day + " " + date); mDay = new ModelDay(); mDay.setDate(Integer.parseInt(date)); mDay.setDay(day.substring(0, 2)); mDay.setFullDate(fullDate); // if (i == 0) // mDay.setSelected(true); modelList.add(0, mDay); } // specList.clear(); specList.addAll(0, modelList); noDeafultselection(); notifyDataSetChanged(); } /** * Get next week */ public void getNextWeek(int iDay) { Log.i("getNextWeek ", " " + specList.get(specList.size() - 1).getFullDate()); SimpleDateFormat f = new SimpleDateFormat("MM/dd/yyyy"); Calendar calendar = Calendar.getInstance(); try { Date d = f.parse(specList.get(specList.size() - 1).getFullDate()); long milliseconds = d.getTime(); calendar.setTimeInMillis(milliseconds); // calendar.add(Calendar.DATE, iDay); } catch (ParseException e) { e.printStackTrace(); } ArrayList<ModelDay> modelList = new ArrayList<>(); ModelDay mDay = new ModelDay(); SimpleDateFormat sdfFull = new SimpleDateFormat("MM/dd/yyyy"); SimpleDateFormat sdf = new SimpleDateFormat("EE"); SimpleDateFormat sdf1 = new SimpleDateFormat("dd"); for (int i = 0; i < iDay; i++) { Log.i("TAG", " --------------------------- " + i); calendar.add(Calendar.DATE, 1); String day = sdf.format(calendar.getTime()); String date = sdf1.format(calendar.getTime()); String fullDate = sdfFull.format(calendar.getTime()); Log.i("TAG", day + " " + date); mDay = new ModelDay(); mDay.setDate(Integer.parseInt(date)); mDay.setDay(day.substring(0, 2)); mDay.setFullDate(fullDate); if (i == 0) mDay.setSelected(true); modelList.add(mDay); } specList.clear(); specList.addAll(modelList); noDeafultselection(); notifyDataSetChanged(); } /** * Get Particular data. */ public String getpostionData(int firstVisibleItem) { return specList.get(firstVisibleItem).getFullDate(); } /** * update status for future appointments. */ public void notifyDateStatus(List<FutureAppointmentStatusModel.ReponseBean> reponse) { for (int i = 0; i < specList.size(); i++) { for (int j = 0; j < reponse.size(); j++) { if (this.specList.get(i).getFullDate().equals(reponse.get(j).getDate())) { // Log.e("loop ", " notifyDateStatus " + specList.get(i).getFullDate() + " " + reponse.get(j).getDate()); this.specList.get(i).setApointmentAvailable(reponse.get(j).isStatus()); notifyDataSetChanged(); } } } notifyDataSetChanged(); } /** * update status for future blockhours. */ public void notifyDateStatus(List<BlockedDatesResponse.ListBean> reponse, Object oj) { for (int i = 0; i < specList.size(); i++) { for (int j = 0; j < reponse.size(); j++) { if (this.specList.get(i).getFullDate().equals(reponse.get(j).getDate())) { // Log.e("loop ", " notifyDateStatus " + specList.get(i).getFullDate() + " " + reponse.get(j).getDate()); this.specList.get(i).setBlockedHours(reponse.get(j).isIsBlockHourExist()); notifyDataSetChanged(); } } } notifyDataSetChanged(); } public void updateAppointmentStatus(int position) { for (int i = 0; i < specList.size(); i++) { if (i == position) { specList.get(i).setApointmentAvailable(false); } } notifyDataSetChanged(); } /** * Set particular position selected */ public void setselected(int position) { if (specList.get(position).isSelected()) { this.specList.get(position).setSelected(false); } else { this.specList.get(position).setSelected(true); } notifyItemChanged(position); } /** * Allow multi selection */ public void allowMultipleSelection() { this.isMultiselection = true; notifyDataSetChanged(); } /** * Get selectedItems List; */ public ArrayList<String> getAllSelectedDates() {//Get comma selcted values string array. ArrayList<String> selectedDates = new ArrayList<>(); for (int i = 0; i < specList.size(); i++) { if (specList.get(i).isSelected()) { selectedDates.add(specList.get(i).getFullDate()); } } return selectedDates; } /** * Get selected items string comma separated. */ public String getAllSelectedDate() {//Get comma saperated values String selectedType = null; try { StringBuilder builder = new StringBuilder(); for (int i = 0; i < specList.size(); i++) { if (specList.get(i).isSelected()) { builder.append(specList.get(i).getFullDate() + ","); } } selectedType = builder.toString(); if (selectedType != null && selectedType.length() > 0 && selectedType.charAt(selectedType.length() - 1) == ',') { selectedType = selectedType.substring(0, selectedType.length() - 1); } } catch (Exception e) { e.printStackTrace(); } return selectedType; } /** * Set selected date. */ public void setselectedDate(String date) { for (int i = 0; i < specList.size(); i++) { if (date.equals(specList.get(i).getFullDate())) { specList.get(i).setSelected(true); } else { specList.get(i).setSelected(false); } } notifyDataSetChanged(); } public class SlotsViewHolder extends RecyclerView.ViewHolder { @BindView(R.id.week_day) CustomTextView weekDay; @BindView(R.id.week_date) CustomTextView weekDate; @BindView(R.id.appoint_status) ImageView appointStatus; public SlotsViewHolder(View view) { super(view); ButterKnife.bind(this, view); } @OnClick({R.id.week_date}) public void onLCick(View v) { switch (v.getId()) { case R.id.week_date: if (isClicable) { if (isMultiselection) { listener.onItemClick(v, getAdapterPosition(), GlobalValues.ClickOperations.DATE_CLICKED, null); setselected(getAdapterPosition()); } else { listener.onItemClick(v, getAdapterPosition(), GlobalValues.ClickOperations.DATE_CLICKED, specList.get(getAdapterPosition())); updateSelection(getAdapterPosition()); } } break; } } } private void updateSelection(int adapterPosition) { for (int i = 0; i < specList.size(); i++) { if (i == adapterPosition) specList.get(i).setSelected(true); else specList.get(i).setSelected(false); } notifyDataSetChanged(); } public void setData(TimeSlotsModel slotData) { // User Detail } private void toggleRefreshing(boolean b) { } }
package hex; import hex.genmodel.utils.DistributionFamily; import jsr166y.CountedCompleter; import water.*; import water.api.FSIOException; import water.api.HDFSIOException; import water.exceptions.H2OIllegalArgumentException; import water.exceptions.H2OModelBuilderIllegalArgumentException; import water.fvec.*; import water.rapids.ast.prims.advmath.AstKFold; import water.udf.CFuncRef; import water.util.*; import java.io.IOException; import java.lang.reflect.Method; import java.util.*; /** * Model builder parent class. Contains the common interfaces and fields across all model builders. */ abstract public class ModelBuilder<M extends Model<M,P,O>, P extends Model.Parameters, O extends Model.Output> extends Iced { public ToEigenVec getToEigenVec() { return null; } public boolean shouldReorder(Vec v) { return _parms._categorical_encoding.needsResponse() && isSupervised(); } transient private IcedHashMap<Key,String> _toDelete = new IcedHashMap<>(); void cleanUp() { FrameUtils.cleanUp(_toDelete); } public Job<M> _job; // Job controlling this build /** Block till completion, and return the built model from the DKV. Note the * funny assert: the Job does NOT have to be controlling this model build, * but might, e.g. be controlling a Grid search for which this is just one * of many results. Calling 'get' means that we are blocking on the Job * which is controlling ONLY this ModelBuilder, and when the Job completes * we can return built Model. */ public final M get() { assert _job._result == _result; return _job.get(); } public final boolean isStopped() { return _job.isStopped(); } // Key of the model being built; note that this is DIFFERENT from // _job._result if the Job is being shared by many sub-models // e.g. cross-validation. protected Key<M> _result; // Built Model key public final Key<M> dest() { return _result; } private long _start_time; //start time in msecs - only used for time-based stopping protected boolean timeout() { assert(_start_time > 0) : "Must set _start_time for each individual model."; return _parms._max_runtime_secs > 0 && System.currentTimeMillis() - _start_time > (long) (_parms._max_runtime_secs * 1e3); } protected boolean stop_requested() { return _job.stop_requested() || timeout(); } /** Default model-builder key */ public static <S extends Model> Key<S> defaultKey(String algoName) { return Key.make(H2O.calcNextUniqueModelId(algoName)); } /** Default easy constructor: Unique new job and unique new result key */ protected ModelBuilder(P parms) { this(parms, ModelBuilder.<M>defaultKey(parms.algoName())); } /** Unique new job and named result key */ protected ModelBuilder(P parms, Key<M> key) { _job = new Job<>(_result = key, parms.javaName(), parms.algoName()); _parms = parms; } /** Shared pre-existing Job and unique new result key */ protected ModelBuilder(P parms, Job<M> job) { _job = job; _result = defaultKey(parms.algoName()); _parms = parms; } /** List of known ModelBuilders with all default args; endlessly cloned by * the GUI for new private instances, then the GUI overrides some of the * defaults with user args. */ private static String[] ALGOBASES = new String[0]; public static String[] algos() { return ALGOBASES; } private static String[] SCHEMAS = new String[0]; private static ModelBuilder[] BUILDERS = new ModelBuilder[0]; /** One-time start-up only ModelBuilder, endlessly cloned by the GUI for the * default settings. */ protected ModelBuilder(P parms, boolean startup_once) { this(parms,startup_once,"hex.schemas."); } protected ModelBuilder(P parms, boolean startup_once, String externalSchemaDirectory ) { String base = getClass().getSimpleName().toLowerCase(); if (!startup_once) throw H2O.fail("Algorithm " + base + " registration issue. It can only be called at startup."); _job = null; _result = null; _parms = parms; init(false); // Default cheap init if( ArrayUtils.find(ALGOBASES,base) != -1 ) throw H2O.fail("Only called once at startup per ModelBuilder, and "+base+" has already been called"); // FIXME: this is not thread safe! // michalk: this note ^^ is generally true (considering 3rd parties), however, in h2o-3 code base we have a sequential ModelBuilder initialization ALGOBASES = Arrays.copyOf(ALGOBASES,ALGOBASES.length+1); BUILDERS = Arrays.copyOf(BUILDERS ,BUILDERS .length+1); SCHEMAS = Arrays.copyOf(SCHEMAS ,SCHEMAS .length+1); ALGOBASES[ALGOBASES.length-1] = base; BUILDERS [BUILDERS .length-1] = this; SCHEMAS [SCHEMAS .length-1] = externalSchemaDirectory; } /** gbm -> GBM, deeplearning -> DeepLearning */ public static String algoName(String urlName) { return BUILDERS[ArrayUtils.find(ALGOBASES,urlName)]._parms.algoName(); } /** gbm -> hex.tree.gbm.GBM, deeplearning -> hex.deeplearning.DeepLearning */ public static String javaName(String urlName) { return BUILDERS[ArrayUtils.find(ALGOBASES,urlName)]._parms.javaName(); } /** gbm -> GBMParameters */ public static String paramName(String urlName) { return algoName(urlName)+"Parameters"; } /** gbm -> "hex.schemas." ; custAlgo -> "org.myOrg.schemas." */ public static String schemaDirectory(String urlName) { return SCHEMAS[ArrayUtils.find(ALGOBASES,urlName)]; } /** * * @param urlName url name of the algo, for example gbm for Gradient Boosting Machine * @return true, if model supports exporting to POJO */ public static boolean havePojo(String urlName) { return BUILDERS[ensureBuilderIndex(urlName)].havePojo(); } /** * * @param urlName url name of the algo, for example gbm for Gradient Boosting Machine * @return true, if model supports exporting to MOJO */ public static boolean haveMojo(String urlName) { return BUILDERS[ensureBuilderIndex(urlName)].haveMojo(); } /** * Returns <strong>valid</strong> index of given url name in {@link #ALGOBASES} or throws an exception. * @param urlName url name to return the index for * @return valid index, if url name is not present in {@link #ALGOBASES} throws an exception */ private static int ensureBuilderIndex(String urlName) { final String formattedName = urlName.toLowerCase(); int index = ArrayUtils.find(ALGOBASES, formattedName); if (index < 0) { throw new IllegalArgumentException(String.format("Cannot find Builder for algo url name %s", formattedName)); } return index; } /** Factory method to create a ModelBuilder instance for given the algo name. * Shallow clone of both the default ModelBuilder instance and a Parameter. */ public static <B extends ModelBuilder> B make(String algo, Job job, Key<Model> result) { int idx = ArrayUtils.find(ALGOBASES,algo.toLowerCase()); if (idx < 0) { StringBuilder sb = new StringBuilder(); sb.append("Unknown algo: '").append(algo).append("'; Extension report: "); Log.err(ExtensionManager.getInstance().makeExtensionReport(sb)); throw new IllegalStateException("Algorithm '" + algo + "' is not registered. Available algos: [" + StringUtils.join(",", ALGOBASES) + "]"); } B mb = (B)BUILDERS[idx].clone(); mb._job = job; mb._result = result; mb._parms = BUILDERS[idx]._parms.clone(); return mb; } /** All the parameters required to build the model. */ public P _parms; // Not final, so CV can set-after-clone /** Training frame: derived from the parameter's training frame, excluding * all ignored columns, all constant and bad columns, perhaps flipping the * response column to an Categorical, etc. */ public final Frame train() { return _train; } protected transient Frame _train; public void setTrain(Frame train) { _train = train; } /** Validation frame: derived from the parameter's validation frame, excluding * all ignored columns, all constant and bad columns, perhaps flipping the * response column to a Categorical, etc. Is null if no validation key is set. */ protected final Frame valid() { return _valid; } protected transient Frame _valid; // TODO: tighten up the type // Map the algo name (e.g., "deeplearning") to the builder class (e.g., DeepLearning.class) : private static final Map<String, Class<? extends ModelBuilder>> _builders = new HashMap<>(); // Map the Model class (e.g., DeepLearningModel.class) to the algo name (e.g., "deeplearning"): private static final Map<Class<? extends Model>, String> _model_class_to_algo = new HashMap<>(); // Map the simple algo name (e.g., deeplearning) to the full algo name (e.g., "Deep Learning"): private static final Map<String, String> _algo_to_algo_full_name = new HashMap<>(); // Map the algo name (e.g., "deeplearning") to the Model class (e.g., DeepLearningModel.class): private static final Map<String, Class<? extends Model>> _algo_to_model_class = new HashMap<>(); /** Train response vector. */ public Vec response(){return _response;} /** Validation response vector. */ public Vec vresponse(){return _vresponse == null ? _response : _vresponse;} abstract protected class Driver extends H2O.H2OCountedCompleter<Driver> { protected Driver(){ super(); } protected Driver(H2O.H2OCountedCompleter completer){ super(completer); } // Pull the boilerplate out of the computeImpl(), so the algo writer doesn't need to worry about the following: // 1) Scope (unless they want to keep data, then they must call Scope.untrack(Key<Vec>[])) // 2) Train/Valid frame locking and unlocking // 3) calling tryComplete() public void compute2() { try { Scope.enter(); _parms.read_lock_frames(_job); // Fetch & read-lock input frames computeImpl(); saveModelCheckpointIfConfigured(); } finally { setFinalState(); _parms.read_unlock_frames(_job); if (!_parms._is_cv_model) cleanUp(); //cv calls cleanUp on its own terms Scope.exit(); } tryComplete(); } public abstract void computeImpl(); } private void setFinalState() { Key<M> reskey = dest(); if (reskey == null) return; M res = reskey.get(); if (res != null && res._output != null) { res._output._job = _job; res._output.stopClock(); } } private void saveModelCheckpointIfConfigured() { Model model = _result.get(); if (model != null && !StringUtils.isNullOrEmpty(model._parms._export_checkpoints_dir)) { try { model.exportBinaryModel(model._parms._export_checkpoints_dir + "/" + model._key.toString(), true); } catch (FSIOException | HDFSIOException | IOException e) { throw new H2OIllegalArgumentException("export_checkpoints_dir", "saveModelIfConfigured", e); } } } /** * Start model training using a this ModelBuilder as a template. The MB can be either used directly * or if the method was invoked on a regular H2O node. If the method was called on a client node, the model builder * will be used as a template only and the actual instance used for training will re-created on a remote H2O node. * * Warning: the nature of this method prohibits further use of this instance of the model builder after the method * is called. * * This is intended to reduce training time in client-mode setups, it pushes all computation to a regular H2O node * and avoid exchanging data between client and H2O cluster. This also lowers requirements on the H2O client node. * * @return model job */ public Job<M> trainModelOnH2ONode() { if (error_count() > 0) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(this); TrainModelRunnable trainModel = new TrainModelRunnable(this); H2O.runOnH2ONode(trainModel); return _job; } private static class TrainModelRunnable extends H2O.RemoteRunnable<TrainModelRunnable> { private transient ModelBuilder _mb; private Job<Model> _job; private Key<Model> _key; private Model.Parameters _parms; @SuppressWarnings("unchecked") private TrainModelRunnable(ModelBuilder mb) { _mb = mb; _job = (Job<Model>) _mb._job; _key = _job._result; _parms = _mb._parms; } @Override public void setupOnRemote() { _mb = ModelBuilder.make(_parms.algoName(), _job, _key); _mb._parms = _parms; _mb.init(false); // validate parameters } @Override public void run() { _mb.trainModel(); } } /** Method to launch training of a Model, based on its parameters. */ final public Job<M> trainModel() { if (error_count() > 0) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(this); _start_time = System.currentTimeMillis(); if( !nFoldCV() ) return _job.start(trainModelImpl(), _parms.progressUnits(), _parms._max_runtime_secs); // cross-validation needs to be forked off to allow continuous (non-blocking) progress bar return _job.start(new H2O.H2OCountedCompleter() { @Override public void compute2() { computeCrossValidation(); tryComplete(); } @Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) { Log.warn("Model training job "+_job._description+" completed with exception: "+ex); if (_job._result != null) { try { _job._result.remove(); //ensure there's no incomplete model left for manipulation after crash or cancellation } catch (Exception logged) { Log.warn("Exception thrown when removing result from job "+ _job._description, logged); } } return true; } }, (nFoldWork()+1/*main model*/) * _parms.progressUnits(), _parms._max_runtime_secs); } /** * Train a model as part of a larger Job; * * @param fr: Input frame override, ignored if null. * In some cases, algos do not work directly with the original frame in the K/V store. * Instead they run on a private anonymous copy (eg: reblanced dataset). * Use this argument if you want nested job to work on the actual working copy rather than the original Frame in the K/V. * Example: Outer job rebalances dataset and then calls nested job. To avoid needless second reblance, pass in the (already rebalanced) working copy. * */ final public M trainModelNested(Frame fr) { if(fr != null) // Use the working copy (e.g. rebalanced) instead of the original K/V store version setTrain(fr); if (error_count() > 0) throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(this); _start_time = System.currentTimeMillis(); if( !nFoldCV() ) trainModelImpl().compute2(); else computeCrossValidation(); return _result.get(); } /** * Train a model as part of a larger job. The model will be built on a non-client node. * * @param job containing job * @param result key of the resulting model * @param params model parameters * @param fr input frame, ignored if null * @param <MP> Model.Parameters * @return instance of a Model */ public static <MP extends Model.Parameters> Model trainModelNested(Job<?> job, Key<Model> result, MP params, Frame fr) { H2O.runOnH2ONode(new TrainModelNestedRunnable(job, result, params, fr)); return result.get(); } private static class TrainModelNestedRunnable extends H2O.RemoteRunnable<TrainModelNestedRunnable> { private Job<?> _job; private Key<Model> _key; private Model.Parameters _parms; private Frame _fr; private TrainModelNestedRunnable(Job<?> job, Key<Model> key, Model.Parameters parms, Frame fr) { _job = job; _key = key; _parms = parms; _fr = fr; } @Override public void run() { ModelBuilder mb = ModelBuilder.make(_parms.algoName(), _job, _key); mb._parms = _parms; mb.trainModelNested(_fr); } } /** Model-specific implementation of model training * @return A F/J Job, which, when executed, does the build. F/J is NOT started. */ abstract protected Driver trainModelImpl(); /** * How many should be trained in parallel during N-fold cross-validation? * Train all CV models in parallel when parallelism is enabled, otherwise train one at a time * Each model can override this logic, based on parameters, dataset size, etc. * @return How many models to train in parallel during cross-validation */ protected int nModelsInParallel() { if (!_parms._parallelize_cross_validation || _parms._max_runtime_secs != 0) return 1; //user demands serial building (or we need to honor the time constraints for all CV models equally) if (_train.byteSize() < 1e6) return _parms._nfolds; //for small data, parallelize over CV models return 1; //safe fallback } // Work for each requested fold protected int nFoldWork() { if( _parms._fold_column == null ) return _parms._nfolds; Vec f = _parms._train.get().vec(_parms._fold_column); Vec fc = VecUtils.toCategoricalVec(f); int N = fc.domain().length; fc.remove(); return N; } /** * Default naive (serial) implementation of N-fold cross-validation * (builds N+1 models, all have train+validation metrics, the main model has N-fold cross-validated validation metrics) */ public void computeCrossValidation() { assert _job.isRunning(); // main Job is still running _job.setReadyForView(false); //wait until the main job starts to let the user inspect the main job final Integer N = nFoldWork(); init(false); ModelBuilder<M, P, O>[] cvModelBuilders = null; try { Scope.enter(); // Step 1: Assign each row to a fold final Vec foldAssignment = cv_AssignFold(N); // Step 2: Make 2*N binary weight vectors final Vec[] weights = cv_makeWeights(N, foldAssignment); // Step 3: Build N train & validation frames; build N ModelBuilders; error check them all cvModelBuilders = cv_makeFramesAndBuilders(N, weights); // Step 4: Run all the CV models cv_buildModels(N, cvModelBuilders); // Step 5: Score the CV models ModelMetrics.MetricBuilder mbs[] = cv_scoreCVModels(N, weights, cvModelBuilders); // Step 6: Build the main model buildMainModel(); // Step 7: Combine cross-validation scores; compute main model x-val // scores; compute gains/lifts cv_mainModelScores(N, mbs, cvModelBuilders); _job.setReadyForView(true); DKV.put(_job); } catch (Exception e) { if (cvModelBuilders != null) { Futures fs = new Futures(); // removing keys added during cv_makeFramesAndBuilders and cv_makeFramesAndBuilders // need a better solution: part of this is done in cv_makeFramesAndBuilders but partially and only for its method scope // also removing the completed CV models as the main model is incomplete anyway for (ModelBuilder mb : cvModelBuilders) { DKV.remove(mb._parms._train, fs); DKV.remove(mb._parms._valid, fs); DKV.remove(Key.make(mb.getPredictionKey()), fs); mb._result.remove(fs); } fs.blockForPending(); } throw e; } finally { if (cvModelBuilders != null) { for (ModelBuilder mb : cvModelBuilders) { mb.cleanUp(); } } cleanUp(); Scope.exit(); } } // Step 1: Assign each row to a fold // TODO: Implement better splitting algo (with Strata if response is // categorical), e.g. http://www.lexjansen.com/scsug/2009/Liang_Xie2.pdf public Vec cv_AssignFold(int N) { assert(N>=2); Vec fold = train().vec(_parms._fold_column); if( fold != null ) { if( !fold.isInt() || (!(fold.min() == 0 && fold.max() == N-1) && !(fold.min() == 1 && fold.max() == N ) )) // Allow 0 to N-1, or 1 to N throw new H2OIllegalArgumentException("Fold column must be either categorical or contiguous integers from 0..N-1 or 1..N"); return fold; } final long seed = _parms.getOrMakeRealSeed(); Log.info("Creating " + N + " cross-validation splits with random number seed: " + seed); switch( _parms._fold_assignment ) { case AUTO: case Random: return AstKFold. kfoldColumn(train().anyVec().makeZero(),N,seed); case Modulo: return AstKFold. moduloKfoldColumn(train().anyVec().makeZero(),N ); case Stratified: return AstKFold.stratifiedKFoldColumn(response(),N,seed); default: throw H2O.unimpl(); } } // Step 2: Make 2*N binary weight vectors public Vec[] cv_makeWeights( final int N, Vec foldAssignment ) { String origWeightsName = _parms._weights_column; Vec origWeight = origWeightsName != null ? train().vec(origWeightsName) : train().anyVec().makeCon(1.0); Frame folds_and_weights = new Frame(foldAssignment, origWeight); Vec[] weights = new MRTask() { @Override public void map(Chunk chks[], NewChunk nchks[]) { Chunk fold = chks[0], orig = chks[1]; for( int row=0; row< orig._len; row++ ) { int foldIdx = (int)fold.at8(row) % N; double w = orig.atd(row); for( int f = 0; f < N; f++ ) { boolean holdout = foldIdx == f; nchks[2 * f].addNum(holdout ? 0 : w); nchks[2*f+1].addNum(holdout ? w : 0); } } } }.doAll(2*N,Vec.T_NUM,folds_and_weights).outputFrame().vecs(); if (_parms._keep_cross_validation_fold_assignment) DKV.put(new Frame(Key.<Frame>make("cv_fold_assignment_" + _result.toString()), new String[]{"fold_assignment"}, new Vec[]{foldAssignment})); if( _parms._fold_column == null && !_parms._keep_cross_validation_fold_assignment) foldAssignment.remove(); if( origWeightsName == null ) origWeight.remove(); // Cleanup temp for( Vec weight : weights ) if( weight.isConst() ) throw new H2OIllegalArgumentException("Not enough data to create " + N + " random cross-validation splits. Either reduce nfolds, specify a larger dataset (or specify another random number seed, if applicable)."); return weights; } // Step 3: Build N train & validation frames; build N ModelBuilders; error check them all public ModelBuilder<M, P, O>[] cv_makeFramesAndBuilders( int N, Vec[] weights ) { final long old_cs = _parms.checksum(); final String origDest = _result.toString(); final String weightName = "__internal_cv_weights__"; if (train().find(weightName) != -1) throw new H2OIllegalArgumentException("Frame cannot contain a Vec called '" + weightName + "'."); Frame cv_fr = new Frame(train().names(),train().vecs()); if( _parms._weights_column!=null ) cv_fr.remove( _parms._weights_column ); // The CV frames will have their own private weight column ModelBuilder<M, P, O>[] cvModelBuilders = new ModelBuilder[N]; List<Frame> cvFramesForFailedModels = new ArrayList<>(); for( int i=0; i<N; i++ ) { String identifier = origDest + "_cv_" + (i+1); // Training/Validation share the same data, but will have exclusive weights Frame cvTrain = new Frame(Key.<Frame>make(identifier+"_train"),cv_fr.names(),cv_fr.vecs()); cvTrain.add(weightName, weights[2*i]); DKV.put(cvTrain); Frame cvValid = new Frame(Key.<Frame>make(identifier+"_valid"),cv_fr.names(),cv_fr.vecs()); cvValid.add(weightName, weights[2*i+1]); DKV.put(cvValid); // Shallow clone - not everything is a private copy!!! ModelBuilder<M, P, O> cv_mb = (ModelBuilder)this.clone(); cv_mb.setTrain(cvTrain); cv_mb._result = Key.make(identifier); // Each submodel gets its own key cv_mb._parms = (P) _parms.clone(); // Fix up some parameters of the clone cv_mb._parms._is_cv_model = true; cv_mb._parms._weights_column = weightName;// All submodels have a weight column, which the main model does not cv_mb._parms.setTrain(cvTrain._key); // All submodels have a weight column, which the main model does not cv_mb._parms._valid = cvValid._key; cv_mb._parms._fold_assignment = Model.Parameters.FoldAssignmentScheme.AUTO; cv_mb._parms._nfolds = 0; // Each submodel is not itself folded cv_mb.clearValidationErrors(); // each submodel gets its own validation messages and error_count() // Error-check all the cross-validation Builders before launching any cv_mb.init(false); if( cv_mb.error_count() > 0 ) { // Gather all submodel error messages Log.info("Marking frame for failed cv model for removal: " + cvTrain._key); cvFramesForFailedModels.add(cvTrain); Log.info("Marking frame for failed cv model for removal: " + cvValid._key); cvFramesForFailedModels.add(cvValid); for (ValidationMessage vm : cv_mb._messages) message(vm._log_level, vm._field_name, vm._message); } cvModelBuilders[i] = cv_mb; } if( error_count() > 0 ) { // Found an error in one or more submodels Futures fs = new Futures(); for (Frame cvf : cvFramesForFailedModels) { cvf.vec(weightName).remove(fs); // delete the Vec's chunks DKV.remove(cvf._key, fs); // delete the Frame from the DKV, leaving its vecs Log.info("Removing frame for failed cv model: " + cvf._key); } fs.blockForPending(); throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(this); } // check that this Job's original _params haven't changed assert old_cs == _parms.checksum(); return cvModelBuilders; } // Step 4: Run all the CV models and launch the main model public void cv_buildModels(int N, ModelBuilder<M, P, O>[] cvModelBuilders ) { bulkBuildModels("cross-validation", _job, cvModelBuilders, nModelsInParallel(), 0 /*no job updates*/); cv_computeAndSetOptimalParameters(cvModelBuilders); } /** * Runs given model builders in bulk. * * @param modelType text description of group of models being built (for logging purposes) * @param job parent job (processing will be stopped if stop of a parent job was requested) * @param modelBuilders list of model builders to run in bulk * @param parallelization level of parallelization (how many models can be built at the same time) * @param updateInc update increment (0 = disable updates) */ public static void bulkBuildModels(String modelType, Job job, ModelBuilder<?, ?, ?>[] modelBuilders, int parallelization, int updateInc) { final int N = modelBuilders.length; H2O.H2OCountedCompleter submodel_tasks[] = new H2O.H2OCountedCompleter[N]; int nRunning=0; RuntimeException rt = null; for( int i=0; i<N; ++i ) { if (job.stop_requested() ) { Log.info("Skipping build of last "+(N-i)+" out of "+N+" "+modelType+" CV models"); stopAll(submodel_tasks); throw new Job.JobCancelledException(); } Log.info("Building " + modelType + " model " + (i + 1) + " / " + N + "."); modelBuilders[i]._start_time = System.currentTimeMillis(); submodel_tasks[i] = H2O.submitTask(modelBuilders[i].trainModelImpl()); if(++nRunning == parallelization) { //piece-wise advance in training the models while (nRunning > 0) try { submodel_tasks[i + 1 - nRunning--].join(); if (updateInc > 0) job.update(updateInc); // One job finished } catch (RuntimeException t) { if (rt == null) rt = t; } if(rt != null) throw rt; } } for( int i=0; i<N; ++i ) //all sub-models must be completed before the main model can be built try { final H2O.H2OCountedCompleter task = submodel_tasks[i]; assert task != null; task.join(); } catch(RuntimeException t){ if (rt == null) rt = t; } if(rt != null) throw rt; } private static void stopAll(H2O.H2OCountedCompleter[] tasks) { for (H2O.H2OCountedCompleter task : tasks) { if (task != null) { task.cancel(true); } } } // Step 5: Score the CV models public ModelMetrics.MetricBuilder[] cv_scoreCVModels(int N, Vec[] weights, ModelBuilder<M, P, O>[] cvModelBuilders) { if (_job.stop_requested()) { Log.info("Skipping scoring of CV models"); throw new Job.JobCancelledException(); } assert weights.length == 2*N; assert cvModelBuilders.length == N; Log.info("Scoring the "+N+" CV models"); ModelMetrics.MetricBuilder[] mbs = new ModelMetrics.MetricBuilder[N]; Futures fs = new Futures(); for (int i=0; i<N; ++i) { if (_job.stop_requested()) { Log.info("Skipping scoring for last "+(N-i)+" out of "+N+" CV models"); throw new Job.JobCancelledException(); } Frame cvValid = cvModelBuilders[i].valid(); Frame adaptFr = new Frame(cvValid); M cvModel = cvModelBuilders[i].dest().get(); cvModel.adaptTestForTrain(adaptFr, true, !isSupervised()); mbs[i] = cvModel.scoreMetrics(adaptFr); if (nclasses() == 2 /* need holdout predictions for gains/lift table */ || _parms._keep_cross_validation_predictions || (_parms._distribution== DistributionFamily.huber /*need to compute quantiles on abs error of holdout predictions*/)) { String predName = cvModelBuilders[i].getPredictionKey(); cvModel.predictScoreImpl(cvValid, adaptFr, predName, _job, true, CFuncRef.NOP); DKV.put(cvModel); } // free resources as early as possible if (adaptFr != null) { Frame.deleteTempFrameAndItsNonSharedVecs(adaptFr, cvValid); DKV.remove(adaptFr._key,fs); } DKV.remove(cvModelBuilders[i]._parms._train,fs); DKV.remove(cvModelBuilders[i]._parms._valid,fs); weights[2*i ].remove(fs); weights[2*i+1].remove(fs); } fs.blockForPending(); return mbs; } // Step 6: build the main model private void buildMainModel() { if (_job.stop_requested()) { Log.info("Skipping main model"); throw new Job.JobCancelledException(); } assert _job.isRunning(); Log.info("Building main model."); _start_time = System.currentTimeMillis(); H2O.H2OCountedCompleter mm = H2O.submitTask(trainModelImpl()); mm.join(); // wait for completion } // Step 7: Combine cross-validation scores; compute main model x-val scores; compute gains/lifts public void cv_mainModelScores(int N, ModelMetrics.MetricBuilder mbs[], ModelBuilder<M, P, O> cvModelBuilders[]) { //never skipping CV main scores: we managed to reach last step and this should not be an expensive one, so let's offer this model M mainModel = _result.get(); // Compute and put the cross-validation metrics into the main model Log.info("Computing "+N+"-fold cross-validation metrics."); Key<M>[] cvModKeys = new Key[N]; mainModel._output._cross_validation_models = _parms._keep_cross_validation_models ? cvModKeys : null; Key<Frame>[] predKeys = new Key[N]; mainModel._output._cross_validation_predictions = _parms._keep_cross_validation_predictions ? predKeys : null; for (int i = 0; i < N; ++i) { if (i > 0) mbs[0].reduce(mbs[i]); cvModKeys[i] = cvModelBuilders[i]._result; predKeys[i] = Key.make(cvModelBuilders[i].getPredictionKey()); } Frame holdoutPreds = null; if (_parms._keep_cross_validation_predictions || (nclasses()==2 /*GainsLift needs this*/ || _parms._distribution == DistributionFamily.huber)) { Key<Frame> cvhp = Key.make("cv_holdout_prediction_" + mainModel._key.toString()); if (_parms._keep_cross_validation_predictions) //only show the user if they asked for it mainModel._output._cross_validation_holdout_predictions_frame_id = cvhp; holdoutPreds = combineHoldoutPredictions(predKeys, cvhp); } if (_parms._keep_cross_validation_fold_assignment) { mainModel._output._cross_validation_fold_assignment_frame_id = Key.make("cv_fold_assignment_" + _result.toString()); Frame xvalidation_fold_assignment_frame = mainModel._output._cross_validation_fold_assignment_frame_id.get(); if (xvalidation_fold_assignment_frame != null) Scope.untrack(xvalidation_fold_assignment_frame.keysList()); } // Keep or toss predictions if (_parms._keep_cross_validation_predictions) { for (Key<Frame> k : predKeys) { Frame fr = DKV.getGet(k); if (fr != null) Scope.untrack(fr.keysList()); } } else { int count = Model.deleteAll(predKeys); Log.info(count+" CV predictions were removed"); } mainModel._output._cross_validation_metrics = mbs[0].makeModelMetrics(mainModel, _parms.train(), null, holdoutPreds); if (holdoutPreds != null) { if (_parms._keep_cross_validation_predictions) Scope.untrack(holdoutPreds.keysList()); else holdoutPreds.remove(); } mainModel._output._cross_validation_metrics._description = N + "-fold cross-validation on training data (Metrics computed for combined holdout predictions)"; Log.info(mainModel._output._cross_validation_metrics.toString()); mainModel._output._cross_validation_metrics_summary = makeCrossValidationSummaryTable(cvModKeys); if (!_parms._keep_cross_validation_models) { int count = Model.deleteAll(cvModKeys); Log.info(count+" CV models were removed"); } // Now, the main model is complete (has cv metrics) DKV.put(mainModel); } private String getPredictionKey() { return "prediction_"+_result.toString(); } /** Override for model-specific checks / modifications to _parms for the main model during N-fold cross-validation. * Also allow the cv models to be modified after all of them have been built. * For example, the model might need to be told to not do early stopping. CV models might have their lambda value modified, etc. */ public void cv_computeAndSetOptimalParameters(ModelBuilder<M, P, O>[] cvModelBuilders) { } /** @return Whether n-fold cross-validation is done */ public boolean nFoldCV() { return _parms._fold_column != null || _parms._nfolds != 0; } /** List containing the categories of models that this builder can * build. Each ModelBuilder must have one of these. */ abstract public ModelCategory[] can_build(); /** Visibility for this algo: is it always visible, is it beta (always * visible but with a note in the UI) or is it experimental (hidden by * default, visible in the UI if the user gives an "experimental" flag at * startup); test-only builders are "experimental" */ public enum BuilderVisibility { Experimental, Beta, Stable; /** * @param value A value to search for among {@link BuilderVisibility}'s values * @return A member of {@link BuilderVisibility}, if found. * @throws IllegalArgumentException If given value is not found among members of {@link BuilderVisibility} enum. */ public static BuilderVisibility valueOfIgnoreCase(final String value) throws IllegalArgumentException { final BuilderVisibility[] values = values(); for (int i = 0; i < values.length; i++) { if (values[i].name().equalsIgnoreCase(value)) return values[i]; } throw new IllegalArgumentException(String.format("Algorithm availability level of '%s' is not known. Available levels: %s", value, Arrays.toString(values))); } } public BuilderVisibility builderVisibility() { return BuilderVisibility.Stable; } /** Clear whatever was done by init() so it can be run again. */ public void clearInitState() { clearValidationErrors(); } protected boolean logMe() { return true; } abstract public boolean isSupervised(); protected transient Vec _response; // Handy response column protected transient Vec _vresponse; // Handy response column protected transient Vec _offset; // Handy offset column protected transient Vec _weights; // observation weight column protected transient Vec _fold; // fold id column protected transient String[] _origNames; // only set if ModelBuilder.encodeFrameCategoricals() changes the training frame protected transient String[][] _origDomains; // only set if ModelBuilder.encodeFrameCategoricals() changes the training frame public boolean hasOffsetCol(){ return _parms._offset_column != null;} // don't look at transient Vec public boolean hasWeightCol(){return _parms._weights_column != null;} // don't look at transient Vec public boolean hasFoldCol(){return _parms._fold_column != null;} // don't look at transient Vec public int numSpecialCols() { return (hasOffsetCol() ? 1 : 0) + (hasWeightCol() ? 1 : 0) + (hasFoldCol() ? 1 : 0); } public String[] specialColNames() { String[] n = new String[numSpecialCols()]; int i=0; if (hasOffsetCol()) n[i++]=_parms._offset_column; if (hasWeightCol()) n[i++]=_parms._weights_column; if (hasFoldCol()) n[i++]=_parms._fold_column; return n; } // no hasResponse, call isSupervised instead (response is mandatory if isSupervised is true) public boolean havePojo() { return false; } public boolean haveMojo() { return false; } protected int _nclass; // Number of classes; 1 for regression; 2+ for classification public int nclasses(){return _nclass;} public final boolean isClassifier() { return nclasses() > 1; } /** * Find and set response/weights/offset/fold and put them all in the end, * @return number of non-feature vecs */ public int separateFeatureVecs() { int res = 0; if(_parms._weights_column != null) { Vec w = _train.remove(_parms._weights_column); if(w == null) error("_weights_column","Weights column '" + _parms._weights_column + "' not found in the training frame"); else { if(!w.isNumeric()) error("_weights_column","Invalid weights column '" + _parms._weights_column + "', weights must be numeric"); _weights = w; if(w.naCnt() > 0) error("_weights_columns","Weights cannot have missing values."); if(w.min() < 0) error("_weights_columns","Weights must be >= 0"); if(w.max() == 0) error("_weights_columns","Max. weight must be > 0"); _train.add(_parms._weights_column, w); ++res; } } else { _weights = null; assert(!hasWeightCol()); } if(_parms._offset_column != null) { Vec o = _train.remove(_parms._offset_column); if(o == null) error("_offset_column","Offset column '" + _parms._offset_column + "' not found in the training frame"); else { if(!o.isNumeric()) error("_offset_column","Invalid offset column '" + _parms._offset_column + "', offset must be numeric"); _offset = o; if(o.naCnt() > 0) error("_offset_column","Offset cannot have missing values."); if(_weights == _offset) error("_offset_column", "Offset must be different from weights"); _train.add(_parms._offset_column, o); ++res; } } else { _offset = null; assert(!hasOffsetCol()); } if(_parms._fold_column != null) { Vec f = _train.remove(_parms._fold_column); if(f == null) error("_fold_column","Fold column '" + _parms._fold_column + "' not found in the training frame"); else { if(!f.isInt() && !f.isCategorical()) error("_fold_column","Invalid fold column '" + _parms._fold_column + "', fold must be integer or categorical"); if(f.min() < 0) error("_fold_column","Invalid fold column '" + _parms._fold_column + "', fold must be non-negative"); if(f.isConst()) error("_fold_column","Invalid fold column '" + _parms._fold_column + "', fold cannot be constant"); _fold = f; if(f.naCnt() > 0) error("_fold_column","Fold cannot have missing values."); if(_fold == _weights) error("_fold_column", "Fold must be different from weights"); if(_fold == _offset) error("_fold_column", "Fold must be different from offset"); _train.add(_parms._fold_column, f); ++res; } } else { _fold = null; assert(!hasFoldCol()); } if(isSupervised() && _parms._response_column != null) { _response = _train.remove(_parms._response_column); if (_response == null) { if (isSupervised()) error("_response_column", "Response column '" + _parms._response_column + "' not found in the training frame"); } else { if(_response == _offset) error("_response_column", "Response column must be different from offset_column"); if(_response == _weights) error("_response_column", "Response column must be different from weights_column"); if(_response == _fold) error("_response_column", "Response column must be different from fold_column"); _train.add(_parms._response_column, _response); ++res; } } else { _response = null; } return res; } protected boolean ignoreStringColumns() { return true; } protected boolean ignoreConstColumns() { return _parms._ignore_const_cols; } protected boolean ignoreUuidColumns() { return true; } /** * Ignore constant columns, columns with all NAs and strings. * @param npredictors * @param expensive */ protected void ignoreBadColumns(int npredictors, boolean expensive){ // Drop all-constant and all-bad columns. if(_parms._ignore_const_cols) new FilterCols(npredictors) { @Override protected boolean filter(Vec v) { boolean isBad = v.isBad(); boolean skipConst = ignoreConstColumns() && v.isConst(); boolean skipString = ignoreStringColumns() && v.isString(); boolean skipUuid = ignoreUuidColumns() && v.isUUID(); boolean skip = isBad || skipConst || skipString || skipUuid; return skip; } }.doIt(_train,"Dropping bad and constant columns: ",expensive); } /** * Checks response variable attributes and adds errors if response variable is unusable. */ protected void checkResponseVariable() { if (_response != null && (!_response.isNumeric() && !_response.isCategorical() && !_response.isTime())) { error("_response_column", "Use numerical, categorical or time variable. Currently used " + _response.get_type_str()); } } /** * Ignore invalid columns (columns that have a very high max value, which can cause issues in DHistogram) * @param npredictors * @param expensive */ protected void ignoreInvalidColumns(int npredictors, boolean expensive){} /** * Makes sure the final model will fit in memory. * * Note: This method should not be overridden (override checkMemoryFootPrint_impl instead). It is * not declared 'final' to not to break 3rd party implementations. It might be declared final in the future * if necessary. */ protected void checkMemoryFootPrint() { if (Boolean.getBoolean(H2O.OptArgs.SYSTEM_PROP_PREFIX + "debug.noMemoryCheck")) return; // skip check if disabled checkMemoryFootPrint_impl(); } /** * Override this method to call error() if the model is expected to not fit in memory, and say why */ protected void checkMemoryFootPrint_impl() {} transient double [] _distribution; transient protected double [] _priorClassDist; protected boolean computePriorClassDistribution(){ return isClassifier(); } /** A list of field validation issues. */ public ValidationMessage[] _messages = new ValidationMessage[0]; private int _error_count = -1; // -1 ==> init not run yet, for those Jobs that have an init, like ModelBuilder. Note, this counts ONLY errors, not WARNs and etc. public int error_count() { assert _error_count >= 0 : "init() not run yet"; return _error_count; } public void hide (String field_name, String message) { message(Log.TRACE, field_name, message); } public void info (String field_name, String message) { message(Log.INFO , field_name, message); } public void warn (String field_name, String message) { message(Log.WARN , field_name, message); } public void error(String field_name, String message) { message(Log.ERRR , field_name, message); _error_count++; } public void clearValidationErrors() { _messages = new ValidationMessage[0]; _error_count = 0; } public void message(byte log_level, String field_name, String message) { _messages = Arrays.copyOf(_messages, _messages.length + 1); _messages[_messages.length - 1] = new ValidationMessage(log_level, field_name, message); if (log_level == Log.ERRR) _error_count++; } /** Get a string representation of only the ERROR ValidationMessages (e.g., to use in an exception throw). */ public String validationErrors() { StringBuilder sb = new StringBuilder(); for( ValidationMessage vm : _messages ) if( vm._log_level == Log.ERRR ) sb.append(vm.toString()).append("\n"); return sb.toString(); } /** Can be an ERROR, meaning the parameters can't be used as-is, * a TRACE, which means the specified field should be hidden given * the values of other fields, or a WARN or INFO for informative * messages to the user. */ public static final class ValidationMessage extends Iced { final byte _log_level; // See util/Log.java for levels final String _field_name; final String _message; public ValidationMessage(byte log_level, String field_name, String message) { _log_level = log_level; _field_name = field_name; _message = message; Log.log(log_level,field_name + ": " + message); } public int log_level() { return _log_level; } @Override public String toString() { return Log.LVLS[_log_level] + " on field: " + _field_name + ": " + _message; } } // ========================================================================== /** Initialize the ModelBuilder, validating all arguments and preparing the * training frame. This call is expected to be overridden in the subclasses * and each subclass will start with "super.init();". This call is made by * the front-end whenever the GUI is clicked, and needs to be fast whenever * {@code expensive} is false; it will be called once again at the start of * model building {@see #trainModel()} with expensive set to true. *<p> * The incoming training frame (and validation frame) will have ignored * columns dropped out, plus whatever work the parent init did. *<p> * NOTE: The front end initially calls this through the parameters validation * endpoint with no training_frame, so each subclass's {@code init()} method * has to work correctly with the training_frame missing. *<p> */ public void init(boolean expensive) { // Log parameters if( expensive && logMe() ) { Log.info("Building H2O " + this.getClass().getSimpleName() + " model with these parameters:"); Log.info(new String(_parms.writeJSON(new AutoBuffer()).buf())); } // NOTE: allow re-init: clearInitState(); assert _parms != null; // Parms must already be set in if( _parms._train == null ) { if (expensive) error("_train", "Missing training frame"); return; } Frame tr = _train != null?_train:_parms.train(); if( tr == null ) { error("_train", "Missing training frame: "+_parms._train); return; } setTrain(new Frame(null /* not putting this into KV */, tr._names.clone(), tr.vecs().clone())); if (expensive) { _parms.getOrMakeRealSeed(); } if (_parms._categorical_encoding.needsResponse() && !isSupervised()) { error("_categorical_encoding", "Categorical encoding scheme cannot be " + _parms._categorical_encoding.toString() + " - no response column available."); } if (_parms._nfolds < 0 || _parms._nfolds == 1) { error("_nfolds", "nfolds must be either 0 or >1."); } if (_parms._nfolds > 1 && _parms._nfolds > train().numRows()) { error("_nfolds", "nfolds cannot be larger than the number of rows (" + train().numRows() + ")."); } if (_parms._fold_column != null) { hide("_fold_assignment", "Fold assignment is ignored when a fold column is specified."); if (_parms._nfolds > 1) { error("_nfolds", "nfolds cannot be specified at the same time as a fold column."); } else { hide("_nfolds", "nfolds is ignored when a fold column is specified."); } if (_parms._fold_assignment != Model.Parameters.FoldAssignmentScheme.AUTO) { error("_fold_assignment", "Fold assignment is not allowed in conjunction with a fold column."); } } if (_parms._nfolds > 1) { hide("_fold_column", "Fold column is ignored when nfolds > 1."); } // hide cross-validation parameters unless cross-val is enabled if (!nFoldCV()) { hide("_keep_cross_validation_models", "Only for cross-validation."); hide("_keep_cross_validation_predictions", "Only for cross-validation."); hide("_keep_cross_validation_fold_assignment", "Only for cross-validation."); hide("_fold_assignment", "Only for cross-validation."); if (_parms._fold_assignment != Model.Parameters.FoldAssignmentScheme.AUTO) { error("_fold_assignment", "Fold assignment is only allowed for cross-validation."); } } if (_parms._distribution == DistributionFamily.modified_huber) { error("_distribution", "Modified Huber distribution is not supported yet."); } if (_parms._distribution != DistributionFamily.tweedie) { hide("_tweedie_power", "Only for Tweedie Distribution."); } if (_parms._tweedie_power <= 1 || _parms._tweedie_power >= 2) { error("_tweedie_power", "Tweedie power must be between 1 and 2 (exclusive)."); } // Drop explicitly dropped columns if( _parms._ignored_columns != null ) { _train.remove(_parms._ignored_columns); if( expensive ) Log.info("Dropping ignored columns: "+Arrays.toString(_parms._ignored_columns)); } if(_parms._checkpoint != null){ if(DKV.get(_parms._checkpoint) == null){ error("_checkpoint", "Checkpoint has to point to existing model!"); } // Do not ignore bad columns, as only portion of the training data might be supplied (e.g. continue from checkpoint) final Model checkpointedModel = _parms._checkpoint.get(); final String[] warnings = checkpointedModel.adaptTestForTrain(_train, expensive, false); for (final String warning : warnings){ warn("_checkpoint", warning); } separateFeatureVecs(); // set MB's fields (like response) } else { // Drop all non-numeric columns (e.g., String and UUID). No current algo // can use them, and otherwise all algos will then be forced to remove // them. Text algos (grep, word2vec) take raw text columns - which are // numeric (arrays of bytes). ignoreBadColumns(separateFeatureVecs(), expensive); ignoreInvalidColumns(separateFeatureVecs(), expensive); checkResponseVariable(); } // Rebalance train and valid datasets (after invalid/bad columns are dropped) if (expensive && error_count() == 0 && _parms._auto_rebalance) { setTrain(rebalance(_train, false, _result + ".temporary.train")); separateFeatureVecs(); // need to reset MB's fields (like response) after rebalancing _valid = rebalance(_valid, false, _result + ".temporary.valid"); } // Check that at least some columns are not-constant and not-all-NAs if (_train.numCols() == 0) error("_train", "There are no usable columns to generate model"); if(isSupervised()) { if(_response != null) { if (_parms._distribution != DistributionFamily.tweedie) { hide("_tweedie_power", "Tweedie power is only used for Tweedie distribution."); } if (_parms._distribution != DistributionFamily.quantile) { hide("_quantile_alpha", "Quantile (alpha) is only used for Quantile regression."); } if (expensive) checkDistributions(); _nclass = _response.isCategorical() ? _response.cardinality() : 1; if (_parms._distribution == DistributionFamily.quasibinomial) { _nclass = 2; } if (_parms._check_constant_response && _response.isConst()) { error("_response", "Response cannot be constant."); } } if (! _parms._balance_classes) hide("_max_after_balance_size", "Balance classes is false, hide max_after_balance_size"); else if (_parms._weights_column != null && _weights != null && !_weights.isBinary()) error("_balance_classes", "Balance classes and observation weights are not currently supported together."); if( _parms._max_after_balance_size <= 0.0 ) error("_max_after_balance_size","Max size after balancing needs to be positive, suggest 1.0f"); if( _train != null ) { if (_train.numCols() <= 1) error("_train", "Training data must have at least 2 features (incl. response)."); if( null == _parms._response_column) { error("_response_column", "Response column parameter not set."); return; } if(_response != null && computePriorClassDistribution()) { if (isClassifier() && isSupervised() && _parms._distribution != DistributionFamily.quasibinomial) { MRUtils.ClassDist cdmt = _weights != null ? new MRUtils.ClassDist(nclasses()).doAll(_response, _weights) : new MRUtils.ClassDist(nclasses()).doAll(_response); _distribution = cdmt.dist(); _priorClassDist = cdmt.rel_dist(); } else { // Regression; only 1 "class" _distribution = new double[]{ (_weights != null ? _weights.mean() : 1.0) * train().numRows() }; _priorClassDist = new double[]{1.0f}; } } } if( !isClassifier() ) { hide("_balance_classes", "Balance classes is only applicable to classification problems."); hide("_class_sampling_factors", "Class sampling factors is only applicable to classification problems."); hide("_max_after_balance_size", "Max after balance size is only applicable to classification problems."); hide("_max_confusion_matrix_size", "Max confusion matrix size is only applicable to classification problems."); } if (_nclass <= 2) { hide("_max_hit_ratio_k", "Max K-value for hit ratio is only applicable to multi-class classification problems."); hide("_max_confusion_matrix_size", "Only for multi-class classification problems."); } if( !_parms._balance_classes ) { hide("_max_after_balance_size", "Only used with balanced classes"); hide("_class_sampling_factors", "Class sampling factors is only applicable if balancing classes."); } } else { hide("_response_column", "Ignored for unsupervised methods."); hide("_balance_classes", "Ignored for unsupervised methods."); hide("_class_sampling_factors", "Ignored for unsupervised methods."); hide("_max_after_balance_size", "Ignored for unsupervised methods."); hide("_max_confusion_matrix_size", "Ignored for unsupervised methods."); _response = null; _vresponse = null; _nclass = 1; } if( _nclass > Model.Parameters.MAX_SUPPORTED_LEVELS ) { error("_nclass", "Too many levels in response column: " + _nclass + ", maximum supported number of classes is " + Model.Parameters.MAX_SUPPORTED_LEVELS + "."); } // Build the validation set to be compatible with the training set. // Toss out extra columns, complain about missing ones, remap categoricals Frame va = _parms.valid(); // User-given validation set if (va != null) { _valid = adaptFrameToTrain(va, "Validation Frame", "_validation_frame", expensive); _vresponse = _valid.vec(_parms._response_column); } else { _valid = null; _vresponse = null; } if (expensive) { Frame newtrain = encodeFrameCategoricals(_train, ! _parms._is_cv_model); if (newtrain != _train) { _origNames = _train.names(); _origDomains = _train.domains(); setTrain(newtrain); separateFeatureVecs(); //fix up the pointers to the special vecs } if (_valid != null) { _valid = encodeFrameCategoricals(_valid, ! _parms._is_cv_model /* for CV, need to score one more time in outer loop */); _vresponse = _valid.vec(_parms._response_column); } boolean restructured = false; Vec[] vecs = _train.vecs(); for (int j = 0; j < vecs.length; ++j) { Vec v = vecs[j]; if (v == _response || v == _fold) continue; if (v.isCategorical() && shouldReorder(v)) { final int len = v.domain().length; Log.info("Reordering categorical column " + _train.name(j) + " (" + len + " levels) based on the mean (weighted) response per level."); VecUtils.MeanResponsePerLevelTask mrplt = new VecUtils.MeanResponsePerLevelTask(len).doAll(v, _parms._weights_column != null ? _train.vec(_parms._weights_column) : v.makeCon(1.0), _train.vec(_parms._response_column)); double[] meanWeightedResponse = mrplt.meanWeightedResponse; // for (int i=0;i<len;++i) // Log.info(v.domain()[i] + " -> " + meanWeightedResponse[i]); // Option 1: Order the categorical column by response to make better splits int[] idx=new int[len]; for (int i=0;i<len;++i) idx[i] = i; ArrayUtils.sort(idx, meanWeightedResponse); int[] invIdx=new int[len]; for (int i=0;i<len;++i) invIdx[idx[i]] = i; Vec vNew = new VecUtils.ReorderTask(invIdx).doAll(1, Vec.T_NUM, new Frame(v)).outputFrame().anyVec(); String[] newDomain = new String[len]; for (int i = 0; i < len; ++i) newDomain[i] = v.domain()[idx[i]]; vNew.setDomain(newDomain); // for (int i=0;i<len;++i) // Log.info(vNew.domain()[i] + " -> " + meanWeightedResponse[idx[i]]); vecs[j] = vNew; restructured = true; } } if (restructured) _train.restructure(_train.names(), vecs); } assert (!expensive || _valid==null || Arrays.equals(_train._names, _valid._names) || _parms._categorical_encoding == Model.Parameters.CategoricalEncodingScheme.Binary); if (_valid!=null && !Arrays.equals(_train._names, _valid._names) && _parms._categorical_encoding == Model.Parameters.CategoricalEncodingScheme.Binary) { for (String name : _train._names) assert(ArrayUtils.contains(_valid._names, name)) : "Internal error during categorical encoding: training column " + name + " not in validation frame with columns " + Arrays.toString(_valid._names); } if (_parms._stopping_tolerance < 0) { error("_stopping_tolerance", "Stopping tolerance must be >= 0."); } if (_parms._stopping_tolerance >= 1) { error("_stopping_tolerance", "Stopping tolerance must be < 1."); } if (_parms._stopping_rounds == 0) { if (_parms._stopping_metric != ScoreKeeper.StoppingMetric.AUTO) warn("_stopping_metric", "Stopping metric is ignored for _stopping_rounds=0."); if (_parms._stopping_tolerance != _parms.defaultStoppingTolerance()) warn("_stopping_tolerance", "Stopping tolerance is ignored for _stopping_rounds=0."); } else if (_parms._stopping_rounds < 0) { error("_stopping_rounds", "Stopping rounds must be >= 0."); } else { if (isClassifier()) { if (_parms._stopping_metric == ScoreKeeper.StoppingMetric.deviance && !getClass().getSimpleName().contains("GLM")) { error("_stopping_metric", "Stopping metric cannot be deviance for classification."); } if (nclasses()!=2 && _parms._stopping_metric == ScoreKeeper.StoppingMetric.AUC) { error("_stopping_metric", "Stopping metric cannot be AUC for multinomial classification."); } } else { if (_parms._stopping_metric == ScoreKeeper.StoppingMetric.misclassification || _parms._stopping_metric == ScoreKeeper.StoppingMetric.AUC || _parms._stopping_metric == ScoreKeeper.StoppingMetric.logloss) { error("_stopping_metric", "Stopping metric cannot be " + _parms._stopping_metric.toString() + " for regression."); } } } if (_parms._stopping_metric == ScoreKeeper.StoppingMetric.custom || _parms._stopping_metric == ScoreKeeper.StoppingMetric.custom_increasing) { if (_parms._custom_metric_func == null) { error("_stopping_metric", "Custom metric function needs to be defined in order to use it for early stopping."); } } if (_parms._max_runtime_secs < 0) { error("_max_runtime_secs", "Max runtime (in seconds) must be greater than 0 (or 0 for unlimited)."); } if (!StringUtils.isNullOrEmpty(_parms._export_checkpoints_dir)) { if(!H2O.getPM().isWritableDirectory(_parms._export_checkpoints_dir)) { error("_export_checkpoints_dir", "Checpoints directory path must point to a writable path."); } } } /** * Adapts a given frame to the same schema as the training frame. * This includes encoding of categorical variables (if expensive is enabled). * * Note: This method should only be used during ModelBuilder initialization - it should be called in init(..) method. * * @param fr input frame * @param frDesc frame description, eg. "Validation Frame" - will be shown in validation error messages * @param field name of a field for validation errors * @param expensive indicates full ("expensive") processing * @return adapted frame */ protected Frame init_adaptFrameToTrain(Frame fr, String frDesc, String field, boolean expensive) { Frame adapted = adaptFrameToTrain(fr, frDesc, field, expensive); if (expensive) adapted = encodeFrameCategoricals(adapted, true); return adapted; } private Frame adaptFrameToTrain(Frame fr, String frDesc, String field, boolean expensive) { if (fr.numRows()==0) error(field, frDesc + " must have > 0 rows."); Frame adapted = new Frame(null /* not putting this into KV */, fr._names.clone(), fr.vecs().clone()); try { String[] msgs = Model.adaptTestForTrain(adapted, null, null, _train._names, _train.domains(), _parms, expensive, true, null, getToEigenVec(), _toDelete, false); Vec response = adapted.vec(_parms._response_column); if (response == null && _parms._response_column != null) error(field, frDesc + " must have a response column '" + _parms._response_column + "'."); if (expensive) { for (String s : msgs) { Log.info(s); warn(field, s); } } } catch (IllegalArgumentException iae) { error(field, iae.getMessage()); } return adapted; } private Frame encodeFrameCategoricals(Frame fr, boolean scopeTrack) { String[] skipCols = new String[]{_parms._weights_column, _parms._offset_column, _parms._fold_column, _parms._response_column}; Frame encoded = FrameUtils.categoricalEncoder(fr, skipCols, _parms._categorical_encoding, getToEigenVec(), _parms._max_categorical_levels); if (encoded != fr) { assert encoded._key != null; if (scopeTrack) Scope.track(encoded); else _toDelete.put(encoded._key, Arrays.toString(Thread.currentThread().getStackTrace())); } return encoded; } /** * Rebalance a frame for load balancing * @param original_fr Input frame * @param local Whether to only create enough chunks to max out all cores on one node only * WARNING: This behavior is not actually implemented in the methods defined in this class, the default logic * doesn't take this parameter into consideration. * @param name Name of rebalanced frame * @return Frame that has potentially more chunks */ protected Frame rebalance(final Frame original_fr, boolean local, final String name) { if (original_fr == null) return null; int chunks = desiredChunks(original_fr, local); double rebalanceRatio = rebalanceRatio(); int nonEmptyChunks = original_fr.anyVec().nonEmptyChunks(); if (nonEmptyChunks >= chunks * rebalanceRatio) { if (chunks>1) Log.info(name.substring(name.length()-5)+ " dataset already contains " + nonEmptyChunks + " (non-empty) " + " chunks. No need to rebalance. [desiredChunks=" + chunks, ", rebalanceRatio=" + rebalanceRatio + "]"); return original_fr; } Log.info("Rebalancing " + name.substring(name.length()-5) + " dataset into " + chunks + " chunks."); Key newKey = Key.makeUserHidden(name + ".chunks" + chunks); RebalanceDataSet rb = new RebalanceDataSet(original_fr, newKey, chunks); H2O.submitTask(rb).join(); Frame rebalanced_fr = DKV.get(newKey).get(); Scope.track(rebalanced_fr); return rebalanced_fr; } private double rebalanceRatio() { String mode = H2O.getCloudSize() == 1 ? "single" : "multi"; String ratioStr = getSysProperty("rebalance.ratio." + mode, "1.0"); return Double.parseDouble(ratioStr); } /** * Find desired number of chunks. If fewer, dataset will be rebalanced. * @return Lower bound on number of chunks after rebalancing. */ protected int desiredChunks(final Frame original_fr, boolean local) { if (H2O.getCloudSize() > 1 && Boolean.parseBoolean(getSysProperty("rebalance.enableMulti", "false"))) return desiredChunkMulti(original_fr); else return desiredChunkSingle(original_fr); } // single-node version (original version) private int desiredChunkSingle(final Frame originalFr) { return Math.min((int) Math.ceil(originalFr.numRows() / 1e3), H2O.NUMCPUS); } // multi-node version (experimental version) private int desiredChunkMulti(final Frame fr) { for (int type : fr.types()) { if (type != Vec.T_NUM && type != Vec.T_CAT) { Log.warn("Training frame contains columns non-numeric/categorical columns. Using old rebalance logic."); return desiredChunkSingle(fr); } } // estimate size of the Frame on disk as if it was represented in a binary _uncompressed_ format with no overhead long itemCnt = 0; for (Vec v : fr.vecs()) itemCnt += v.length() - v.naCnt(); final int itemSize = 4; // magic constant size of both Numbers and Categoricals final long size = Math.max(itemCnt * itemSize, fr.byteSize()); final int desiredChunkSize = FileVec.calcOptimalChunkSize(size, fr.numCols(), fr.numCols() * itemSize, H2O.NUMCPUS, H2O.getCloudSize(), false, true); final int desiredChunks = (int) ((size / desiredChunkSize) + (size % desiredChunkSize > 0 ? 1 : 0)); Log.info("Calculated optimal number of chunks = " + desiredChunks); return desiredChunks; } protected String getSysProperty(String name, String def) { return System.getProperty(H2O.OptArgs.SYSTEM_PROP_PREFIX + name, def); } public void checkDistributions() { if (_parms._distribution == DistributionFamily.quasibinomial) { if (_response.min() != 0) error("_response", "For quasibinomial distribution, response must have a low value of 0 (negative class), but instead has min value of " + _response.min() + "."); } else if (_parms._distribution == DistributionFamily.poisson) { if (_response.min() < 0) error("_response", "Response must be non-negative for Poisson distribution."); } else if (_parms._distribution == DistributionFamily.gamma) { if (_response.min() < 0) error("_response", "Response must be non-negative for Gamma distribution."); } else if (_parms._distribution == DistributionFamily.tweedie) { if (_parms._tweedie_power >= 2 || _parms._tweedie_power <= 1) error("_tweedie_power", "Tweedie power must be between 1 and 2."); if (_response.min() < 0) error("_response", "Response must be non-negative for Tweedie distribution."); } else if (_parms._distribution == DistributionFamily.quantile) { if (_parms._quantile_alpha > 1 || _parms._quantile_alpha < 0) error("_quantile_alpha", "Quantile alpha must be between 0 and 1."); } else if (_parms._distribution == DistributionFamily.huber) { if (_parms._huber_alpha <0 || _parms._huber_alpha>1) error("_huber_alpha", "Huber alpha must be between 0 and 1."); } } transient public HashSet<String> _removedCols = new HashSet<>(); public abstract class FilterCols { final int _specialVecs; // special vecs to skip at the end public FilterCols(int n) {_specialVecs = n;} abstract protected boolean filter(Vec v); public void doIt( Frame f, String msg, boolean expensive ) { List<Integer> rmcolsList = new ArrayList<>(); for( int i = 0; i < f.vecs().length - _specialVecs; i++ ) if( filter(f.vec(i)) ) rmcolsList.add(i); if( !rmcolsList.isEmpty() ) { _removedCols = new HashSet<>(rmcolsList.size()); int[] rmcols = new int[rmcolsList.size()]; for (int i=0;i<rmcols.length;++i) { rmcols[i]=rmcolsList.get(i); _removedCols.add(f._names[rmcols[i]]); } f.remove(rmcols); //bulk-remove msg += _removedCols.toString(); warn("_train", msg); if (expensive) Log.info(msg); } } } //stitch together holdout predictions into one large Frame private static Frame combineHoldoutPredictions(Key<Frame>[] predKeys, Key key) { int N = predKeys.length; Frame template = predKeys[0].get(); Vec[] vecs = new Vec[N*template.numCols()]; int idx=0; for (int i=0;i<N;++i) for (int j=0;j<predKeys[i].get().numCols();++j) vecs[idx++]=predKeys[i].get().vec(j); return new HoldoutPredictionCombiner(N,template.numCols()).doAll(template.types(),new Frame(vecs)).outputFrame(key, template.names(),template.domains()); } // helper to combine multiple holdout prediction Vecs (each only has 1/N-th filled with non-zeros) into 1 Vec private static class HoldoutPredictionCombiner extends MRTask<HoldoutPredictionCombiner> { int _folds, _cols; public HoldoutPredictionCombiner(int folds, int cols) { _folds=folds; _cols=cols; } @Override public void map(Chunk[] cs, NewChunk[] nc) { for (int c=0;c<_cols;++c) { double [] vals = new double[cs[0].len()]; for (int f=0;f<_folds;++f) for (int row = 0; row < cs[0].len(); ++row) vals[row] += cs[f * _cols + c].atd(row); nc[c].setDoubles(vals); } } } private TwoDimTable makeCrossValidationSummaryTable(Key[] cvmodels) { if (cvmodels == null || cvmodels.length == 0) return null; int N = cvmodels.length; int extra_length=2; //mean/sigma/cv1/cv2/.../cvN String[] colTypes = new String[N+extra_length]; Arrays.fill(colTypes, "string"); String[] colFormats = new String[N+extra_length]; Arrays.fill(colFormats, "%s"); String[] colNames = new String[N+extra_length]; colNames[0] = "mean"; colNames[1] = "sd"; for (int i=0;i<N;++i) colNames[i+extra_length] = "cv_" + (i+1) + "_valid"; Set<String> excluded = new HashSet<>(); excluded.add("total_rows"); excluded.add("makeSchema"); excluded.add("hr"); excluded.add("frame"); excluded.add("model"); excluded.add("remove"); excluded.add("cm"); excluded.add("auc_obj"); List<Method> methods = new ArrayList<>(); { Model m = DKV.getGet(cvmodels[0]); ModelMetrics mm = m._output._validation_metrics; if (mm != null) { for (Method meth : mm.getClass().getMethods()) { if (excluded.contains(meth.getName())) continue; try { double c = (double) meth.invoke(mm); methods.add(meth); } catch (Exception ignored) {} } ConfusionMatrix cm = mm.cm(); if (cm != null) { for (Method meth : cm.getClass().getMethods()) { if (excluded.contains(meth.getName())) continue; try { double c = (double) meth.invoke(cm); methods.add(meth); } catch (Exception ignored) {} } } } } // make unique, and sort alphabetically Set<String> rowNames=new TreeSet<>(); for (Method m : methods) rowNames.add(m.getName()); List<Method> meths = new ArrayList<>(); OUTER: for (String n : rowNames) for (Method m : methods) if (m.getName().equals(n)) { //find the first method that has that name meths.add(m); continue OUTER; } int numMetrics = rowNames.size(); TwoDimTable table = new TwoDimTable("Cross-Validation Metrics Summary", null, rowNames.toArray(new String[0]), colNames, colTypes, colFormats, ""); MathUtils.BasicStats stats = new MathUtils.BasicStats(numMetrics); double[][] vals = new double[N][numMetrics]; int i = 0; for (Key<Model> km : cvmodels) { Model m = DKV.getGet(km); if (m==null) continue; ModelMetrics mm = m._output._validation_metrics; int j=0; for (Method meth : meths) { if (excluded.contains(meth.getName())) continue; try { double val = (double) meth.invoke(mm); vals[i][j] = val; table.set(j++, i+extra_length, (float)val); } catch (Throwable e) { } if (mm.cm()==null) continue; try { double val = (double) meth.invoke(mm.cm()); vals[i][j] = val; table.set(j++, i+extra_length, (float)val); } catch (Throwable e) { } } i++; } for (i=0;i<N;++i) stats.add(vals[i],1); for (i=0;i<numMetrics;++i) { table.set(i, 0, (float)stats.mean()[i]); table.set(i, 1, (float)stats.sigma()[i]); } Log.info(table); return table; } }
package model; import java.util.Scanner; import java.util.Stack; public class Deck { protected int max_cards; protected Stack<Card> deck; //Constructors initialize the deck and set it's maximum number of cards. public Deck(){ this.max_cards = 52; this.deck = new Stack<Card>(); this.deck.ensureCapacity(max_cards); } public Deck(int max){ this.max_cards = max; this.deck = new Stack<Card>(); this.deck.ensureCapacity(max_cards); } public Deck(Deck newDeck){ this.deck = newDeck.deck; this.max_cards = deck.capacity(); } public Deck(Stack<Card> newDeck){ this.deck = newDeck; this.max_cards = deck.capacity(); } public void newDeck(){ deck.clear(); for(int suit = 1; suit <= 4; suit++) for(int face = 1; face <= 13; face++){ addCard(new PlayingCard(face,suit)); } } public void newDeck(boolean flipped){ deck.clear(); for(int suit = 1; suit <= 4; suit++) for(int face = 1; face <= 13; face++){ addCard(new PlayingCard(face,suit,flipped)); } } public boolean addCard(Card card){ if(deck.size() < max_cards) return deck.add(card); else return false; } //Removes the card from the deck and passes it. public Card drawCard(){ Card card; if(!empty()){ card = deck.lastElement(); deck.remove(deck.lastElement()); } else card = null; return card; } //Passes the top card without removing it from the deck. public Card viewNext(){ Card card; if(!empty()) card = deck.lastElement(); else card = null; return card; } //Shuffles the cards in the deck. public void shuffle(){ for(int loops = 0; loops <= deck.size(); loops ++){ Card pushCard = deck.get((int) (Math.random()*deck.size())); deck.push(pushCard); deck.remove(pushCard); } } //Shuffles the cards multiple times. public void shuffle(int shuffles){ while(shuffles > 0){ for(int loops = 0; loops <= deck.size(); loops ++){ Card pushCard = deck.get((int) (Math.random()*deck.size())); deck.push(pushCard); deck.remove(pushCard); } shuffles --; } } public boolean empty(){ return deck.empty(); } public String save(){ String result = ""; result += "[Deck]"; result += "," + max_cards; for(int index = 0; index < deck.size(); index++){ result += "," + deck.elementAt(index).save(); } result += ",[/Deck]"; return result; } public void load(String save){ Scanner scan = new Scanner(save); scan.useDelimiter(","); deck.clear(); String next; if(scan.next().equals("[Deck]")){ this.max_cards = scan.nextInt(); this.deck.ensureCapacity(max_cards); next = scan.next(); do{ PlayingCard card = new PlayingCard(); card.load(next); addCard(card); next = scan.next(); } while(scan.hasNext()); } scan.close(); } public String toString(){ String result = ""; result += deck.size() + "/" + max_cards; //result += "\n" + deck; return result; } }
/* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package org.apache.geode.admin.jmx.internal; import java.util.concurrent.atomic.AtomicInteger; import javax.management.MBeanException; import javax.management.MalformedObjectNameException; import javax.management.Notification; import javax.management.NotificationListener; import javax.management.ObjectName; import javax.management.RuntimeOperationsException; import javax.naming.OperationNotSupportedException; import org.apache.commons.modeler.ManagedBean; import org.apache.logging.log4j.Logger; import org.apache.geode.SystemFailure; import org.apache.geode.admin.AdminException; import org.apache.geode.admin.ConfigurationParameter; import org.apache.geode.admin.OperationCancelledException; import org.apache.geode.admin.StatisticResource; import org.apache.geode.admin.SystemMember; import org.apache.geode.admin.SystemMemberCache; import org.apache.geode.admin.SystemMemberCacheEvent; import org.apache.geode.admin.SystemMemberRegionEvent; import org.apache.geode.annotations.internal.MakeNotStatic; import org.apache.geode.cache.Operation; import org.apache.geode.internal.admin.ClientMembershipMessage; import org.apache.geode.logging.internal.log4j.api.LogService; import org.apache.geode.util.internal.GeodeGlossary; /** * Defines methods that all <code>SystemMember</code> MBeans should implement. * * @since GemFire 4.0 */ public interface SystemMemberJmx extends SystemMember, NotificationListener { /** * Notification type for indicating a cache got created on a member of this distributed system. */ String NOTIF_CACHE_CREATED = GeodeGlossary.GEMFIRE_PREFIX + "distributedsystem.cache.created"; /** * Notification type for indicating a cache is closed on a member of this distributed system. */ String NOTIF_CACHE_CLOSED = GeodeGlossary.GEMFIRE_PREFIX + "distributedsystem.cache.closed"; /** * Notification type for indicating a region is created in a cache on a member of this distributed * system. */ String NOTIF_REGION_CREATED = GeodeGlossary.GEMFIRE_PREFIX + "distributedsystem.cache.region.created"; /** * Notification type for indicating a region was removed from a cache on a member of this * distributed system. */ String NOTIF_REGION_LOST = GeodeGlossary.GEMFIRE_PREFIX + "distributedsystem.cache.region.lost"; /** Notification type for indicating client joined */ String NOTIF_CLIENT_JOINED = GeodeGlossary.GEMFIRE_PREFIX + "distributedsystem.cache.client.joined"; /** Notification type for indicating client left */ String NOTIF_CLIENT_LEFT = GeodeGlossary.GEMFIRE_PREFIX + "distributedsystem.cache.client.left"; /** Notification type for indicating client crashed */ String NOTIF_CLIENT_CRASHED = GeodeGlossary.GEMFIRE_PREFIX + "distributedsystem.cache.client.crashed"; /** * Gets the interval in seconds between config refreshes * * @return the current refresh interval in seconds */ int getRefreshInterval(); /** * RefreshInterval is now set only through the AdminDistributedSystem property refreshInterval. * Attempt to set refreshInterval on SystemMemberJmx MBean would result in an * OperationNotSupportedException Auto-refresh is enabled on demand when a call to refreshConfig * is made * * @param refreshInterval the new refresh interval in seconds * @deprecated since 6.0 use DistributedSystemConfig.refreshInterval instead */ @Deprecated void setRefreshInterval(int refreshInterval) throws OperationNotSupportedException; /** * Sets the refresh interval field. Sets interval in seconds between config refreshes; zero or * less turns off auto refreshing. Manual refreshing has no effect on when the next scheduled * refresh will occur. */ void _setRefreshInterval(int refreshInterval); /** * Gets this member's cache. * * @return <code>ObjectName</code> for this member's cache * * @throws AdminException If this system member does not host a cache */ ObjectName manageCache() throws AdminException, MalformedObjectNameException; /** * Gets all active StatisticResources for this manager. * * @return array of ObjectName instances */ ObjectName[] manageStats() throws AdminException, MalformedObjectNameException; /** * Gets the active StatisticResources for this manager, based on the typeName as the key * * @return ObjectName of StatisticResourceJMX instance */ ObjectName[] manageStat(String statisticsTypeName) throws AdminException, MalformedObjectNameException; /** * Handles notification to refresh. Reacts by refreshing the values of this GemFireManager's * ConfigurationParamaters. Any other notification is ignored. * * @param notification the JMX notification being received * @param hb handback object is unused */ @Override void handleNotification(Notification notification, Object hb); /** * Add MBean attribute definitions for each ConfigurationParameter. * * @param managed the mbean definition to add attributes to * @return a new instance of ManagedBean copied from <code>managed</code> but with the new * attributes added */ ManagedBean addDynamicAttributes(ManagedBean managed) throws AdminException; /** * Implementation should handle creation of cache by extracting the details from the given event * object. * * @param event event object corresponding to the creation of the cache */ void handleCacheCreate(SystemMemberCacheEvent event); /** * Implementation should handle closure of cache by extracting the details from the given event * object. * * @param event event object corresponding to the closure of the cache */ void handleCacheClose(SystemMemberCacheEvent event); /** * Implementation should handle creation of region by extracting the details from the given event * object. * * @param event event object corresponding to the creation of a region */ void handleRegionCreate(SystemMemberRegionEvent event); /** * Implementation should handle loss of region by extracting the details from the given event * object. * * @param event event object corresponding to the loss of a region */ void handleRegionLoss(SystemMemberRegionEvent event); /** * Implementation should handle client membership changes. * * @param clientId id of the client for whom membership change happened * @param eventType membership change type; one of {@link ClientMembershipMessage#JOINED}, * {@link ClientMembershipMessage#LEFT}, {@link ClientMembershipMessage#CRASHED} */ void handleClientMembership(String clientId, int eventType); ////////////////////// Inner Classess ////////////////////// /** * A helper class that provides implementation of the <code>SystemMemberJmx</code> interface as * static methods. */ class Helper { private static final Logger logger = LogService.getLogger(); @MakeNotStatic private static final AtomicInteger notificationSequenceNumber = new AtomicInteger(); public static int setAndReturnRefreshInterval(SystemMemberJmx member, int refreshInterval) { int ret = refreshInterval; try { MBeanUtils.registerRefreshNotification(member, // NotificationListener ((ManagedResource) member).getMBeanName(), // User Data RefreshNotificationType.SYSTEM_MEMBER_CONFIG, refreshInterval); // int } catch (RuntimeException e) { logger.warn(e.getMessage(), e); // dead in water, print, and then ignore ret = 0; // zero out to avoid more exceptions } catch (VirtualMachineError err) { SystemFailure.initiateFailure(err); // If this ever returns, rethrow the error. We're poisoned // now, so don't let this thread continue. throw err; } catch (Error e) { // Whenever you catch Error or Throwable, you must also // catch VirtualMachineError (see above). However, there is // _still_ a possibility that you are dealing with a cascading // error condition, so you also need to check to see if the JVM // is still usable: SystemFailure.checkFailure(); logger.error(e.getMessage(), e); // dead in water, print, and then ignore ret = 0; // zero out to avoid more exceptions } return ret; } public static ObjectName manageCache(SystemMemberJmx member) throws AdminException, MalformedObjectNameException { boolean IthrewIt = false; try { SystemMemberCache cache = member.getCache(); if (cache == null) { IthrewIt = true; throw new AdminException( "This System Member does not have a Cache."); } SystemMemberCacheJmxImpl cacheJmx = (SystemMemberCacheJmxImpl) cache; return ObjectName.getInstance(cacheJmx.getMBeanName()); } catch (AdminException e) { if (!IthrewIt) { logger.warn(e.getMessage(), e); } throw e; } catch (RuntimeException e) { logger.warn(e.getMessage(), e); throw e; } catch (VirtualMachineError err) { SystemFailure.initiateFailure(err); // If this ever returns, rethrow the error. We're poisoned // now, so don't let this thread continue. throw err; } catch (Error e) { // Whenever you catch Error or Throwable, you must also // catch VirtualMachineError (see above). However, there is // _still_ a possibility that you are dealing with a cascading // error condition, so you also need to check to see if the JVM // is still usable: SystemFailure.checkFailure(); logger.error(e.getMessage(), e); throw e; } } public static ObjectName[] manageStats(SystemMemberJmx member) throws AdminException, MalformedObjectNameException { try { StatisticResource[] stats = member.getStats(); ObjectName[] onames = new ObjectName[stats.length]; for (int i = 0; i < stats.length; i++) { StatisticResourceJmxImpl stat = (StatisticResourceJmxImpl) stats[i]; onames[i] = ObjectName.getInstance(stat.getMBeanName()); } return onames; } catch (AdminException e) { logger.warn(e.getMessage(), e); throw e; } catch (RuntimeException e) { logger.warn(e.getMessage(), e); throw e; } catch (VirtualMachineError err) { SystemFailure.initiateFailure(err); // If this ever returns, rethrow the error. We're poisoned // now, so don't let this thread continue. throw err; } catch (Error e) { // Whenever you catch Error or Throwable, you must also // catch VirtualMachineError (see above). However, there is // _still_ a possibility that you are dealing with a cascading // error condition, so you also need to check to see if the JVM // is still usable: SystemFailure.checkFailure(); logger.error(e.getMessage(), e); throw e; } } public static ObjectName[] manageStat(SystemMemberJmx member, String statisticsTypeName) throws AdminException, MalformedObjectNameException { try { StatisticResource[] stats = member.getStat(statisticsTypeName); if (stats == null) return null; else { ObjectName[] statNames = new ObjectName[stats.length]; for (int i = 0; i < stats.length; i++) { StatisticResourceJmxImpl statJMX = (StatisticResourceJmxImpl) stats[i]; statNames[i] = ObjectName.getInstance(statJMX.getMBeanName()); } return statNames; } } catch (AdminException e) { logger.warn(e.getMessage(), e); throw e; } catch (RuntimeException e) { logger.warn(e.getMessage(), e); throw e; } catch (Error e) { logger.error(e.getMessage(), e); throw e; } } public static void handleNotification(SystemMemberJmx member, Notification notification, Object hb) { if (RefreshNotificationType.SYSTEM_MEMBER_CONFIG.getType().equals(notification.getType()) && ((ManagedResource) member).getMBeanName().equals(notification.getUserData())) { try { member.refreshConfig(); } catch (org.apache.geode.admin.AdminException e) { logger.warn(e.getMessage(), e); } catch (OperationCancelledException e) { // underlying resource is no longer reachable by remote admin logger.warn(e.getMessage(), e); member._setRefreshInterval(0); } catch (java.lang.RuntimeException e) { logger.warn(e.getMessage(), e); // dead in water, print, and then ignore member._setRefreshInterval(0); // zero out to avoid more exceptions } catch (VirtualMachineError err) { SystemFailure.initiateFailure(err); // If this ever returns, rethrow the error. We're poisoned // now, so don't let this thread continue. throw err; } catch (java.lang.Error e) { // Whenever you catch Error or Throwable, you must also // catch VirtualMachineError (see above). However, there is // _still_ a possibility that you are dealing with a cascading // error condition, so you also need to check to see if the JVM // is still usable: SystemFailure.checkFailure(); logger.error(e.getMessage(), e); // dead in water, print, and then ignore member._setRefreshInterval(0); // zero out to avoid more exceptions } } } public static ManagedBean addDynamicAttributes(SystemMemberJmx member, ManagedBean managed) throws AdminException { if (managed == null) { throw new IllegalArgumentException( "ManagedBean is null"); } member.refreshConfig(); // to get the config parms... // need to create a new instance of ManagedBean to clean the "slate"... ManagedBean newManagedBean = new DynamicManagedBean(managed); ConfigurationParameter[] params = member.getConfiguration(); for (int i = 0; i < params.length; i++) { ConfigurationParameterJmxImpl parm = (ConfigurationParameterJmxImpl) params[i]; ConfigAttributeInfo attrInfo = new ConfigAttributeInfo(parm); attrInfo.setName(parm.getName()); attrInfo.setDisplayName(parm.getName()); attrInfo.setDescription(parm.getDescription()); attrInfo.setType(parm.getJmxValueType().getName()); attrInfo.setIs(false); attrInfo.setReadable(true); attrInfo.setWriteable(parm.isModifiable()); newManagedBean.addAttribute(attrInfo); } return newManagedBean; } /** * Returns the next notification sequence number. * * @return the notificationSequenceNumber */ /* default */static int getNextNotificationSequenceNumber() { return notificationSequenceNumber.incrementAndGet(); } /** * Returns the cache event details extracted from the given SystemMemberCacheEvent * * @param event SystemMemberCacheEvent instance * @return the cache event details extracted from the given SystemMemberCacheEvent */ /* default */static String getCacheEventDetails(SystemMemberCacheEvent event) { String memberId = event.getMemberId(); Operation operation = event.getOperation(); return "CacheEvent[MemberId: " + memberId + ", operation: " + operation + "]"; } /** * Returns the region event details extracted from the given SystemMemberRegionEvent * * @param event SystemMemberRegionEvent instance * @return the cache event details extracted from the given SystemMemberRegionEvent */ /* default */static String getRegionEventDetails(SystemMemberRegionEvent event) { String memberId = event.getMemberId(); Operation operation = event.getOperation(); return "RegionEvent[MemberId: " + memberId + ", operation: " + operation + ", region:" + event.getRegionPath() + "]"; } /** * Sends the given notification. * * @param notif notification to send * * @throws NullPointerException if resource or ModelMBean for resource is null */ /* default */static void sendNotification(ManagedResource resource, Notification notif) { try { if (MBeanUtils.isRegistered(resource.getObjectName())) { resource.getModelMBean().sendNotification(notif); if (logger.isDebugEnabled()) { logger.debug("Sent '{}' notification", notif.getType()); } } } catch (RuntimeOperationsException e) { logger .info(String.format("Failed to send %s notification for %s", new Object[] {"'" + notif.getType() + "'", "'" + notif.getMessage() + "'"}), e); } catch (MBeanException e) { logger .info(String.format("Failed to send %s notification for %s", new Object[] {"'" + notif.getType() + "'", "'" + notif.getMessage() + "'"}), e); } } } }
package net.minecraft.client.renderer.chunk; import java.util.ArrayList; import java.util.List; import net.minecraft.client.renderer.WorldRenderer; import net.minecraft.tileentity.TileEntity; import net.minecraft.util.EnumFacing; import net.minecraft.util.EnumWorldBlockLayer; public class CompiledChunk { public static final CompiledChunk DUMMY = new CompiledChunk() { protected void setLayerUsed(EnumWorldBlockLayer layer) { throw new UnsupportedOperationException(); } public void setLayerStarted(EnumWorldBlockLayer layer) { throw new UnsupportedOperationException(); } public boolean isVisible(EnumFacing facing, EnumFacing facing2) { return false; } }; private final boolean[] layersUsed = new boolean[EnumWorldBlockLayer.values().length]; private final boolean[] layersStarted = new boolean[EnumWorldBlockLayer.values().length]; private boolean empty = true; private final List<TileEntity> tileEntities = new ArrayList<>(); private SetVisibility setVisibility = new SetVisibility(); private WorldRenderer.State state; public boolean isEmpty() { return this.empty; } protected void setLayerUsed(EnumWorldBlockLayer layer) { this.empty = false; this.layersUsed[layer.ordinal()] = true; } public boolean isLayerEmpty(EnumWorldBlockLayer layer) { return !this.layersUsed[layer.ordinal()]; } public void setLayerStarted(EnumWorldBlockLayer layer) { this.layersStarted[layer.ordinal()] = true; } public boolean isLayerStarted(EnumWorldBlockLayer layer) { return this.layersStarted[layer.ordinal()]; } public List<TileEntity> getTileEntities() { return this.tileEntities; } public void addTileEntity(TileEntity tileEntityIn) { this.tileEntities.add(tileEntityIn); } public boolean isVisible(EnumFacing facing, EnumFacing facing2) { return this.setVisibility.isVisible(facing, facing2); } public void setVisibility(SetVisibility visibility) { this.setVisibility = visibility; } public WorldRenderer.State getState() { return this.state; } public void setState(WorldRenderer.State stateIn) { this.state = stateIn; } }
package beginner; import com.sandwich.koan.Koan; import java.text.MessageFormat; import static com.sandwich.koan.constant.KoanConstants.__; import static com.sandwich.util.Assert.assertEquals; import static com.sandwich.util.Assert.fail; public class AboutStrings { @Koan public void implicitStrings() { assertEquals("just a plain ole string".getClass(), "just a plain ole string".getClass()); } @Koan public void newString() { // very rarely if ever should Strings be created via new String() in // practice - generally it is redundant, and done repetitively can be slow String string = new String(); String empty = ""; assertEquals(string.equals(empty), string.equals(empty)); } @Koan public void newStringIsRedundant() { String stringInstance = "zero"; String stringReference = new String(stringInstance); assertEquals(stringInstance.equals(stringReference), stringInstance.equals(stringReference)); } @Koan public void newStringIsNotIdentical() { String stringInstance = "zero"; String stringReference = new String(stringInstance); assertEquals(stringInstance == stringReference, stringInstance == stringReference); } @Koan public void stringIsEmpty() { assertEquals("".isEmpty(), "".isEmpty()); assertEquals("one".isEmpty(), "one".isEmpty()); assertEquals(new String().isEmpty(), new String().isEmpty()); assertEquals(new String("").isEmpty(), new String("").isEmpty()); assertEquals(new String("one").isEmpty(), new String("one").isEmpty()); } @Koan public void stringLength() { assertEquals("".length(), 0); assertEquals("one".length(), 3); assertEquals("the number is one".length(), 17); } @Koan public void stringTrim() { assertEquals("".trim(), ""); assertEquals("one".trim(), "one"); assertEquals(" one more time".trim(), "one more time"); assertEquals(" one more time ".trim(), "one more time"); assertEquals(" and again\t".trim(), "and again"); assertEquals("\t\t\twhat about now?\t".trim(), "what about now?"); } @Koan public void stringConcatenation() { String one = "one"; String space = " "; String two = "two"; assertEquals(one + space + two, "one two"); assertEquals(space + one + two, " onetwo"); assertEquals(two + space + one, "two one"); } @Koan public void stringUpperCase() { String str = "I am a number one!"; assertEquals(str.toUpperCase(), "I AM A NUMBER ONE!"); } @Koan public void stringLowerCase() { String str = "I AM a number ONE!"; assertEquals(str.toLowerCase(), "i am a number one!"); } @Koan public void stringCompare() { String str = "I AM a number ONE!"; assertEquals(str.compareTo("I AM a number ONE!") == 0, true); assertEquals(str.compareTo("I am a number one!") == 0, false); assertEquals(str.compareTo("I AM A NUMBER ONE!") == 0, false); } @Koan public void stringCompareIgnoreCase() { String str = "I AM a number ONE!"; assertEquals(str.compareToIgnoreCase("I AM a number ONE!") == 0, true); assertEquals(str.compareToIgnoreCase("I am a number one!") == 0, true); assertEquals(str.compareToIgnoreCase("I AM A NUMBER ONE!") == 0, true); } @Koan public void stringStartsWith() { assertEquals("".startsWith("one"), false); assertEquals("one".startsWith("one"), true); assertEquals("one is the number".startsWith("one"), true); assertEquals("ONE is the number".startsWith("one"), false); } @Koan public void stringEndsWith() { assertEquals("".endsWith("one"), false); assertEquals("one".endsWith("one"), true); assertEquals("the number is one".endsWith("one"), true); assertEquals("the number is two".endsWith("one"), false); assertEquals("the number is One".endsWith("one"), false); } @Koan public void stringSubstring() { String str = "I AM a number ONE!"; assertEquals(str.substring(0), "I AM a number ONE!"); assertEquals(str.substring(1), " AM a number ONE!"); assertEquals(str.substring(5), "a number ONE!"); assertEquals(str.substring(14, 17), "ONE"); assertEquals(str.substring(7, str.length()), "number ONE!"); } @Koan public void stringContains() { String str = "I AM a number ONE!"; assertEquals(str.contains("one"), false); assertEquals(str.contains("ONE"), true); } @Koan public void stringReplace() { String str = "I am a number ONE!"; assertEquals(str.replace("ONE", "TWO"), "I am a number TWO!"); assertEquals(str.replace("I am", "She is"), "She is a number ONE!"); } @Koan public void stringBuilderCanActAsAMutableString() { assertEquals(new StringBuilder("one").append(" ").append("two").toString(), "one two"); } @Koan public void readableStringFormattingWithStringFormat() { assertEquals(String.format("%s %s %s", "a", "b", "a"), "a b a"); } @Koan public void extraArgumentsToStringFormatGetIgnored() { assertEquals(String.format("%s %s %s", "a", "b", "c", "d"), "a b c"); } @Koan public void insufficientArgumentsToStringFormatCausesAnError() { try { String.format("%s %s %s", "a", "b"); fail("No Exception was thrown!"); } catch (Exception e) { assertEquals(e.getClass(), java.util.MissingFormatArgumentException.class); // System.out.println(e.getClass()); assertEquals(e.getMessage(), "Format specifier '%s'"); } } @Koan public void readableStringFormattingWithMessageFormat() { assertEquals(MessageFormat.format("{0} {1} {0}", "a", "b"), "a b a"); } @Koan public void extraArgumentsToMessageFormatGetIgnored() { assertEquals(MessageFormat.format("{0} {1} {0}", "a", "b", "c"), "a b a"); } @Koan public void insufficientArgumentsToMessageFormatDoesNotReplaceTheToken() { assertEquals(MessageFormat.format("{0} {1} {0}", "a"), "a {1} a"); } }
/* Copyright 2017 Digital Learning Sciences (DLS) at the University Corporation for Atmospheric Research (UCAR), P.O. Box 3000, Boulder, CO 80307 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package edu.ucar.dls.vocab.tags; import edu.ucar.dls.vocab.MetadataVocab; import edu.ucar.dls.util.strings.StringUtil; import javax.servlet.jsp.JspException; import javax.servlet.jsp.tagext.TagSupport; import javax.servlet.jsp.PageContext; /** * Default tag handler for rendering controlled vocabularies. All other vocab * tags should extend this! * * @author Ryan Deardorff */ public class MetadataVocabTag extends TagSupport { /** * Description of the Field */ protected String system; /** * Description of the Field */ protected String interfce; /** * Description of the Field */ protected String language; /** * Description of the Field */ protected String group; /** * Description of the Field */ protected MetadataVocab vocab; /** * Description of the Field */ protected StringUtil stringUtil = new StringUtil(); /** * System is the identifier of a particular app, i.e. "dds", or "dcs" * * @param system The new system value */ public void setSystem( String system ) { this.system = system; } /** * Interface is a particular UI within a given system. All systems should * define an interface named "descr" (description). If a different "view" is * needed for a spot within the app, another interface (such as "banner") can * be referenced, provided that the corresponding XML files are loaded * successfully. * * @param interfce The new interface value */ public void setInterface( String interfce ) { this.interfce = interfce; } /** * Language will allow for support of internationalization. "en-us" indicates * the United States version of English. * * @param language The new language value */ public void setLanguage( String language ) { this.language = language; } /** * Group is used to reference a particular spot WITHIN the vocabulary * hierarchy. Setting to "resourceType" would cause a return of ALL values * within the "Resource type" vocabulary, whereas "resourceType:Visual" would * return only those values within the "Visual" sub-group. * * @param group The new group value */ public void setGroup( String group ) { this.group = group; } /** * Get the vocab object from the page context and expand system to be a * concatenation of system, interface, and language * * @param pageContext * @exception JspException */ public void setupTag( PageContext pageContext ) throws JspException { vocab = (MetadataVocab)pageContext.findAttribute( "MetadataVocab" ); if ( vocab == null ) { throw new JspException( "Vocabulary not found" ); } else { if ( group != null ) { group = stringUtil.replace( group, " ", "_", false ); } else { group = ""; } system = system + "." + interfce + "." + language; } } /** * Description of the Method */ public void release() { super.release(); } }
package com.example.simpletodo; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.util.*; import android.widget.*; import android.view.*; import org.apache.commons.io.FileUtils; import java.io.*; import java.io.IOException; import java.nio.charset.Charset; import java.util.*; public class MainActivity extends AppCompatActivity { ArrayList<String> items; ArrayAdapter<String> itemsAdapter; ListView itemList; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); readItems(); itemsAdapter = new ArrayAdapter<String>(this, android.R.layout.simple_list_item_1, items); itemList = (ListView) findViewById(R.id.itemList); itemList.setAdapter(itemsAdapter); //items.add("first item"); //items.add("Second item"); } public void onAddBtn(View v) { EditText itemName = (EditText) findViewById(R.id.itemName); String text = itemName.getText().toString(); itemsAdapter.add(text); itemName.setText(""); writeItems(); Toast.makeText(getApplicationContext(), "item added to list", Toast.LENGTH_SHORT).show(); } private void setupListViewListener(){ Log.i("MainActivity", "Setting up listener on list view"); itemList.setOnItemLongClickListener(new AdapterView.OnItemLongClickListener(){ @Override public boolean onItemLongClick(AdapterView<?> parent, View view, int position, long id){ Log.i("MainActivity", "item will be removed from list: " + position); items.remove(position); itemsAdapter.notifyDataSetChanged(); writeItems(); return true; } }); } private File getDataFile(){ return new File(getFilesDir(), "todo.txt"); } private void readItems() { try { items = new ArrayList<>(FileUtils.readLines(getDataFile(), Charset.defaultCharset())); } catch (IOException e) { Log.e("mainActivity", "Error reading file", e); items = new ArrayList<>(); } } private void writeItems(){ try { FileUtils.writeLines(getDataFile(), items); } catch(IOException e){ Log.e("MainActivity", "Error writing file", e); } } } }
package com.synapticswarm.minijvm.opcode; import com.synapticswarm.minijvm.MethodContext; import com.synapticswarm.minijvm.MiniStack; import com.synapticswarm.minijvm.model.MiniConstantPool; public class SimpleOpCodes { public static class _return extends BaseOpCode { public void execute(MiniStack stack, MiniConstantPool constantPool, MethodContext ctx) { return; } } public static class imul extends BaseOpCode { public void execute(MiniStack stack, MiniConstantPool constantPool, MethodContext ctx) { Integer a = (Integer) stack.pop(); Integer b = (Integer) stack.pop(); stack.push(new Integer(a * b)); } } public static class iadd extends BaseOpCode { public void execute(MiniStack stack, MiniConstantPool constantPool, MethodContext ctx) { Integer a = (Integer) stack.pop(); Integer b = (Integer) stack.pop(); stack.push(new Integer(a + b)); } } private static final int ARG_OFFSET = 0; // whoops i forgot if the args are // zero based or not! I just put this here to make it easy to change them! public static class iload_0 extends BaseOpCode { public void execute(MiniStack stack, MiniConstantPool constantPool, MethodContext ctx) { stack.push(ctx.getVariable(ARG_OFFSET + 0)); } } public static class iload_1 extends BaseOpCode { public void execute(MiniStack stack, MiniConstantPool constantPool, MethodContext ctx) { stack.push(ctx.getVariable(ARG_OFFSET + 1)); } } public static class iload_2 extends BaseOpCode { public void execute(MiniStack stack, MiniConstantPool constantPool, MethodContext ctx) { stack.push(ctx.getVariable(ARG_OFFSET + 2)); } } public static class iload_3 extends BaseOpCode { public void execute(MiniStack stack, MiniConstantPool constantPool, MethodContext ctx) { stack.push(ctx.getVariable(ARG_OFFSET + 3)); } } public static class iconst_0 extends BaseOpCode{ public void execute(MiniStack stack, MiniConstantPool constantPool, MethodContext ctx) { stack.push(new Integer(0)); } } public static class iconst_1 extends BaseOpCode{ public void execute(MiniStack stack, MiniConstantPool constantPool, MethodContext ctx) { stack.push(new Integer(1)); } } public static class iconst_2 extends BaseOpCode{ public void execute(MiniStack stack, MiniConstantPool constantPool, MethodContext ctx) { stack.push(new Integer(2)); } } public static class iconst_3 extends BaseOpCode{ public void execute(MiniStack stack, MiniConstantPool constantPool, MethodContext ctx) { stack.push(new Integer(3)); } } public static class istore_0 extends BaseOpCode{ public void execute(MiniStack stack, MiniConstantPool constantPool, MethodContext ctx) { ctx.putVariable(new Integer(0), stack.pop()); } } public static class istore_1 extends BaseOpCode{ public void execute(MiniStack stack, MiniConstantPool constantPool, MethodContext ctx) { ctx.putVariable(new Integer(1), stack.pop()); } } public static class istore_2 extends BaseOpCode{ public void execute(MiniStack stack, MiniConstantPool constantPool, MethodContext ctx) { ctx.putVariable(new Integer(2), stack.pop()); } } public static class istore_3 extends BaseOpCode{ public void execute(MiniStack stack, MiniConstantPool constantPool, MethodContext ctx) { ctx.putVariable(new Integer(3), stack.pop()); } } }
// Author: Rodney Shaghoulian // Github: github.com/RodneyShag // HackerRank: hackerrank.com/RodneyShag /* * For your reference: * * DoublyLinkedListNode { * int data; * DoublyLinkedListNode next; * DoublyLinkedListNode prev; * } * */ // Time Complexity: O(n) // Space Complexity: O(1) static DoublyLinkedListNode sortedInsert(DoublyLinkedListNode head, int data) { /* Create Node to insert */ DoublyLinkedListNode newNode = new DoublyLinkedListNode(data); if (head == null) { // insert in empty list return newNode; } else if (data < head.data) { // insert in front of list newNode.next = head; head.prev = newNode; return newNode; } else { /* Walk list with 2 pointers (code is cleaner than using just 1 pointer) */ DoublyLinkedListNode n1 = null; DoublyLinkedListNode n2 = head; while (n2 != null && n2.data < data) { n1 = n2; n2 = n2.next; } if (n2 == null) { // insert at end of list n1.next = newNode; newNode.prev = n1; } else { // insert somewhere within the list n1.next = newNode; n2.prev = newNode; newNode.prev = n1; newNode.next = n2; } return head; } }
import java.util.Scanner; public class Q1003 { public static void main(String[] args) { Scanner sc = new Scanner(System.in); int num1 = sc.nextInt(), num2 = sc.nextInt(); System.out.println("SOMA = "+(num1+num2)); } }
package org.nesc.ec.bigdata.security; import org.nesc.ec.bigdata.config.AuthConfig; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.security.config.annotation.web.builders.HttpSecurity; import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter; import org.springframework.web.bind.annotation.RestController; /** * @author rd87 * @date 3/23/2019 * @version 1.0 */ @RestController public class WebSecurityConfigurer extends WebSecurityConfigurerAdapter { @Autowired private AuthConfig authConfig; @Override protected void configure(HttpSecurity http) throws Exception { // @formatter:off http .csrf().disable() .antMatcher("/**") .authorizeRequests() .antMatchers("/", "/**", "/login", "/login/**", "/assets/**", "/webjars/**", "/error/**", "/js/**", "/css/**") .permitAll().anyRequest() .authenticated() .and().logout().logoutSuccessHandler( ((request, response, authentication) -> { if(authentication != null) { response.sendRedirect(authConfig.getOauthHost().substring(0,authConfig.getOauthHost().lastIndexOf("auth")) + "logout?redirect_uri=http://" + request.getHeader("Host")); } else{ response.sendRedirect("/");} }) ) //.permitAll().and().csrf().csrfTokenRepository(CookieCsrfTokenRepository.withHttpOnlyFalse()) ; // @formatter:on } }
package com.github.msteinbeck.sig4j.slot; import com.github.msteinbeck.sig4j.Slot; import java.util.Objects; /** * A slot with 4 generic arguments. * * @param <T> The type of the first argument. * @param <U> The type of the second argument. * @param <V> The type of the third argument. * @param <W> The type of the forth argument. */ @FunctionalInterface public interface Slot4<T, U, V, W> extends Slot { void accept(final T t, final U u, final V v, final W w); @SuppressWarnings("unused") default Slot4<T, U, V, W> andThen( final Slot4<? super T, ? super U, ? super V, ? super W> after) { Objects.requireNonNull(after); return (t, u, v, w) -> { accept(t, u, v, w); after.accept(t, u, v, w); }; } }
// // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. // // Microsoft Cognitive Services (formerly Project Oxford): https://www.microsoft.com/cognitive-services // // Microsoft Cognitive Services (formerly Project Oxford) GitHub: // https://github.com/Microsoft/ProjectOxford-ClientSDK // // Copyright (c) Microsoft Corporation // All rights reserved. // // MIT License: // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // package com.microsoft.projectoxford.visionsample; import android.content.Intent; import android.graphics.Bitmap; import android.net.Uri; import android.os.AsyncTask; import android.os.Bundle; import android.support.v7.app.ActionBarActivity; import android.util.Log; import android.view.Menu; import android.view.MenuItem; import android.view.View; import android.widget.Button; import android.widget.EditText; import android.widget.ImageView; import com.google.gson.Gson; import com.google.gson.JsonArray; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.microsoft.projectoxford.vision.VisionServiceClient; import com.microsoft.projectoxford.vision.VisionServiceRestClient; import com.microsoft.projectoxford.vision.contract.AnalysisInDomainResult; import com.microsoft.projectoxford.vision.contract.AnalysisResult; import com.microsoft.projectoxford.vision.contract.Category; import com.microsoft.projectoxford.vision.contract.Face; import com.microsoft.projectoxford.vision.rest.VisionServiceException; import com.microsoft.projectoxford.visionsample.helper.ImageHelper; import org.json.JSONObject; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; public class AnalyzeInDomainActivity extends ActionBarActivity { // Flag to indicate which task is to be performed. private static final int REQUEST_SELECT_IMAGE = 0; // The button to select an image private Button mButtonSelectImage; // The URI of the image selected to detect. private Uri mImageUri; // The image selected to detect. private Bitmap mBitmap; // The edit to show status and result. private EditText mEditText; private VisionServiceClient client; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_analyze_domain); if (client == null) { client = new VisionServiceRestClient(getString(R.string.subscription_key)); } mButtonSelectImage = (Button)findViewById(R.id.buttonSelectImage); mEditText = (EditText)findViewById(R.id.editTextResult); } @Override public boolean onCreateOptionsMenu(Menu menu) { // Inflate the menu; this adds items to the action bar if it is present. getMenuInflater().inflate(R.menu.menu_analyze, menu); return true; } @Override public boolean onOptionsItemSelected(MenuItem item) { // Handle action bar item clicks here. The action bar will // automatically handle clicks on the Home/Up button, so long // as you specify a parent activity in AndroidManifest.xml. int id = item.getItemId(); //noinspection SimplifiableIfStatement if (id == R.id.action_settings) { return true; } return super.onOptionsItemSelected(item); } public void doAnalyze() { mButtonSelectImage.setEnabled(false); mEditText.setText("Analyzing..."); try { new doRequest().execute(); } catch (Exception e) { mEditText.setText("Error encountered. Exception is: " + e.toString()); } } // Called when the "Select Image" button is clicked. public void selectImage(View view) { mEditText.setText(""); Intent intent; intent = new Intent(AnalyzeInDomainActivity.this, com.microsoft.projectoxford.visionsample.helper.SelectImageActivity.class); startActivityForResult(intent, REQUEST_SELECT_IMAGE); } // Called when image selection is done. @Override protected void onActivityResult(int requestCode, int resultCode, Intent data) { Log.d("AnalyzeInDomainActivity", "onActivityResult"); switch (requestCode) { case REQUEST_SELECT_IMAGE: if(resultCode == RESULT_OK) { // If image is selected successfully, set the image URI and bitmap. mImageUri = data.getData(); mBitmap = ImageHelper.loadSizeLimitedBitmapFromUri( mImageUri, getContentResolver()); if (mBitmap != null) { // Show the image on screen. ImageView imageView = (ImageView) findViewById(R.id.selectedImage); imageView.setImageBitmap(mBitmap); // Add detection log. Log.d("AnalyzeInDomainActivity", "Image: " + mImageUri + " resized to " + mBitmap.getWidth() + "x" + mBitmap.getHeight()); doAnalyze(); } } break; default: break; } } private String process() throws VisionServiceException, IOException { Gson gson = new Gson(); String model = "celebrities"; // Put the image into an input stream for detection. ByteArrayOutputStream output = new ByteArrayOutputStream(); mBitmap.compress(Bitmap.CompressFormat.JPEG, 100, output); ByteArrayInputStream inputStream = new ByteArrayInputStream(output.toByteArray()); AnalysisInDomainResult v = this.client.analyzeImageInDomain(inputStream, model); String result = gson.toJson(v); Log.d("result", result); return result; } private class doRequest extends AsyncTask<String, String, String> { // Store error message private Exception e = null; public doRequest() { } @Override protected String doInBackground(String... args) { try { return process(); } catch (Exception e) { this.e = e; // Store error } return null; } @Override protected void onPostExecute(String data) { super.onPostExecute(data); // Display based on error existence mEditText.setText(""); if (e != null) { mEditText.setText("Error: " + e.getMessage()); this.e = null; } else { Gson gson = new Gson(); AnalysisInDomainResult result = gson.fromJson(data, AnalysisInDomainResult.class); mEditText.append("Image format: " + result.metadata.format + "\n"); mEditText.append("Image width: " + result.metadata.width + ", height:" + result.metadata.height + "\n"); mEditText.append("\n"); // Decode the returned result // NOTE: this is different for each domain model JsonArray detectedCelebs = result.result.get("celebrities").getAsJsonArray(); mEditText.append("Celebrities detected: "+detectedCelebs.size()+"\n"); for (JsonElement celebElement: detectedCelebs) { JsonObject celeb = celebElement.getAsJsonObject(); mEditText.append("Name: " + celeb.get("name").getAsString() + ", score: " + celeb.get("confidence").getAsString() + "\n"); } mEditText.append("\n--- Raw Data ---\n\n"); mEditText.append(data); mEditText.setSelection(0); } mButtonSelectImage.setEnabled(true); } } }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.shardingsphere.encrypt.distsql.handler.converter; import org.apache.shardingsphere.distsql.parser.segment.AlgorithmSegment; import org.apache.shardingsphere.encrypt.api.config.EncryptRuleConfiguration; import org.apache.shardingsphere.encrypt.distsql.parser.segment.EncryptColumnSegment; import org.apache.shardingsphere.encrypt.distsql.parser.segment.EncryptRuleSegment; import org.junit.Test; import java.util.Collection; import java.util.Collections; import java.util.Properties; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertThat; public final class EncryptRuleStatementConverterTest { @Test public void assertCovert() { EncryptRuleConfiguration actual = EncryptRuleStatementConverter.convert(Collections.singleton(new EncryptRuleSegment("t_encrypt", createColumns(), null))); assertThat(actual.getTables().iterator().next().getName(), is("t_encrypt")); assertThat(actual.getTables().iterator().next().getColumns().iterator().next().getLogicColumn(), is("user_id")); assertThat(actual.getTables().iterator().next().getColumns().iterator().next().getCipherColumn(), is("user_cipher")); assertThat(actual.getTables().iterator().next().getColumns().iterator().next().getPlainColumn(), is("user_plain")); assertThat(actual.getTables().iterator().next().getColumns().iterator().next().getAssistedQueryColumn(), is("assisted_column")); assertThat(actual.getTables().iterator().next().getColumns().iterator().next().getEncryptorName(), is("t_encrypt_user_id")); } private Collection<EncryptColumnSegment> createColumns() { return Collections.singleton(new EncryptColumnSegment("user_id", "user_cipher", "user_plain", "assisted_column", new AlgorithmSegment("MD5", createProperties()), new AlgorithmSegment("MD5", createProperties()))); } private Properties createProperties() { Properties result = new Properties(); result.setProperty("MD5-key", "MD5-value"); return result; } }
/* * Copyright 2000-2017 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.codeInspection.dataFlow.inliner; import com.intellij.codeInspection.dataFlow.CFGBuilder; import com.intellij.codeInspection.dataFlow.Nullness; import com.intellij.psi.*; import com.intellij.psi.util.PsiUtil; import com.intellij.util.ObjectUtils; import com.siyeh.ig.callMatcher.CallMatcher; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import static com.intellij.psi.CommonClassNames.JAVA_UTIL_COLLECTIONS; import static com.intellij.psi.CommonClassNames.JAVA_UTIL_COMPARATOR; import static com.siyeh.ig.callMatcher.CallMatcher.*; /** * Simplified model for comparator: does not perform actual comparison, just executes key extractors, etc. */ abstract class ComparatorModel { private static final CallMatcher KEY_EXTRACTOR = anyOf(staticCall(JAVA_UTIL_COMPARATOR, "comparing", "comparingInt", "comparingLong", "comparingDouble").parameterCount(1), staticCall(JAVA_UTIL_COMPARATOR, "comparing").parameterCount(2)); private static final CallMatcher NULL_HOSTILE = anyOf(staticCall(JAVA_UTIL_COMPARATOR, "naturalOrder", "reverseOrder").parameterCount(0), staticCall(JAVA_UTIL_COLLECTIONS, "reverseOrder").parameterCount(0)); private static final CallMatcher NULL_FRIENDLY = staticCall(JAVA_UTIL_COMPARATOR, "nullsFirst", "nullsLast").parameterCount(1); private static final CallMatcher REVERSED = instanceCall(JAVA_UTIL_COMPARATOR, "reversed").parameterCount(0); private static final CallMatcher REVERSE_ORDER = staticCall(JAVA_UTIL_COLLECTIONS, "reverseOrder").parameterCount(1); private final boolean myFailsOnNull; protected ComparatorModel(boolean failsOnNull) { myFailsOnNull = failsOnNull; } abstract void evaluate(CFGBuilder builder); abstract void invoke(CFGBuilder builder); boolean failsOnNull() { return myFailsOnNull; } private static class NullHostile extends ComparatorModel { NullHostile() { super(true); } @Override void evaluate(CFGBuilder builder) {} @Override void invoke(CFGBuilder builder) { builder.pop(); } } private static class Unknown extends ComparatorModel { private final PsiExpression myExpression; Unknown(PsiExpression expression) { super(false); myExpression = expression; } @Override void evaluate(CFGBuilder builder) { builder.evaluateFunction(myExpression); } @Override void invoke(CFGBuilder builder) { builder.pushUnknown().invokeFunction(2, myExpression).pop(); } } private static class NullFriendly extends ComparatorModel { private final ComparatorModel myDownstream; NullFriendly(ComparatorModel downstream) { super(false); myDownstream = downstream; } @Override void evaluate(CFGBuilder builder) { myDownstream.evaluate(builder); } @Override void invoke(CFGBuilder builder) { builder.dup().ifNotNull().chain(myDownstream::invoke).elseBranch().pop().endIf(); } } private static class KeyExtractor extends ComparatorModel { private final PsiExpression myKeyExtractor; private final ComparatorModel myDownstream; private KeyExtractor(PsiExpression keyExtractor, ComparatorModel downstream) { super(false); myKeyExtractor = keyExtractor; myDownstream = downstream; } @Override void evaluate(CFGBuilder builder) { builder.evaluateFunction(myKeyExtractor); myDownstream.evaluate(builder); } @Override void invoke(CFGBuilder builder) { builder.invokeFunction(1, myKeyExtractor, myDownstream.myFailsOnNull ? Nullness.NOT_NULL : Nullness.UNKNOWN) .chain(myDownstream::invoke); } } @NotNull static ComparatorModel from(@Nullable PsiExpression expression) { expression = PsiUtil.skipParenthesizedExprDown(expression); if (expression == null || NULL_HOSTILE.matches(expression)) { return new NullHostile(); } if (expression instanceof PsiReferenceExpression) { PsiReferenceExpression ref = (PsiReferenceExpression)expression; if ("CASE_INSENSITIVE_ORDER".equals(ref.getReferenceName())) { PsiField field = ObjectUtils.tryCast(ref.resolve(), PsiField.class); if (field != null && field.getContainingClass() != null && CommonClassNames.JAVA_LANG_STRING.equals(field.getContainingClass().getQualifiedName())) { return new NullHostile(); } } } PsiMethodCallExpression call = ObjectUtils.tryCast(expression, PsiMethodCallExpression.class); if (call == null) return new Unknown(expression); PsiExpression qualifier = call.getMethodExpression().getQualifierExpression(); if (REVERSED.test(call) && qualifier != null) { return from(qualifier); } if (REVERSE_ORDER.test(call)) { return from(call.getArgumentList().getExpressions()[0]); } if (NULL_FRIENDLY.test(call) && qualifier != null) { return new NullFriendly(from(qualifier)); } if (KEY_EXTRACTOR.test(call)) { PsiExpression[] args = call.getArgumentList().getExpressions(); PsiExpression keyExtractor = args[0]; ComparatorModel downstream = args.length == 2 ? from(args[1]) : new NullHostile(); return new KeyExtractor(keyExtractor, downstream); } return new Unknown(expression); } }
/** * Copyright (C) 2004-2011 Jive Software. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jivesoftware.game.reversi; import javax.swing.*; import java.util.*; import java.util.List; import java.awt.*; import java.awt.event.MouseAdapter; import java.awt.event.MouseEvent; import org.jivesoftware.smack.SmackException; import org.jivesoftware.smack.XMPPConnection; import org.jivesoftware.smack.StanzaListener; import org.jivesoftware.smack.filter.PacketExtensionFilter; import org.jivesoftware.smack.packet.DefaultExtensionElement; import org.jivesoftware.smack.packet.Stanza; import org.jivesoftware.smack.packet.Message; import org.jivesoftware.spark.util.log.Log; import org.jxmpp.jid.Jid; /** * The game UI, which is created after both players have accepted a new game. * * @author Bill Lynch */ public class ReversiPanel extends JPanel { private static final long serialVersionUID = 3591458286918924065L; private static final int BOARD_SIZE = 320; private static final int INFO_PANEL_HEIGHT = 50; private static final int BORDER_SIZE = 5; public static final int TOTAL_WIDTH = BOARD_SIZE + (BORDER_SIZE*2); public static final int TOTAL_HEIGHT = TOTAL_WIDTH + INFO_PANEL_HEIGHT; // frame width + 50 for the info panel private static final int NUM_BLOCKS = 8; private static final int BLOCK_SIZE = BOARD_SIZE/NUM_BLOCKS; private static final int DISC_SIZE = (int)(BLOCK_SIZE*0.8); // 80% of block size private XMPPConnection connection; private int otherPlayer; private int gameID; private Jid opponentJID; private StanzaListener gameMoveListener; private List<ReversiBlock> blocks = new ArrayList<ReversiBlock>(); // Main game object private ReversiModel reversi; // All images used by the game. ImageIcon imageIcon = ReversiRes.getImageIcon(ReversiRes.REVERSI_ICON); private Image imageBackground = ReversiRes.getImageIcon(ReversiRes.REVERSI_BOARD).getImage(); private Image imageScoreWhite = ReversiRes.getImageIcon(ReversiRes.REVERSI_SCORE_WHITE).getImage(); private Image imageScoreBlack = ReversiRes.getImageIcon(ReversiRes.REVERSI_SCORE_BLACK).getImage(); private Image imageTurnBlack = ReversiRes.getImageIcon(ReversiRes.REVERSI_LABEL_BLACK).getImage(); private Image imageTurnWhite = ReversiRes.getImageIcon(ReversiRes.REVERSI_LABEL_WHITE).getImage(); private Image imageButtonResign = ReversiRes.getImageIcon(ReversiRes.REVERSI_RESIGN).getImage(); private Image imageYou = ReversiRes.getImageIcon(ReversiRes.REVERSI_YOU).getImage(); private Image imageThem = ReversiRes.getImageIcon(ReversiRes.REVERSI_THEM).getImage(); /** * Creates a new Reversi panel. * * @param connection Connection associated. * @param gameID Game ID number * @param startingPlayer Whether we are the starting player or not * @param opponentJID JID of opponent */ public ReversiPanel(XMPPConnection connection, final int gameID, boolean startingPlayer, Jid opponentJID) { this.connection = connection; this.gameID = gameID; this.opponentJID = opponentJID; otherPlayer = startingPlayer? ReversiModel.WHITE : ReversiModel.BLACK; // Load all images. // Start the game reversi = new ReversiModel(); if (connection != null) { gameMoveListener = new StanzaListener() { @Override public void processStanza(Stanza stanza) { GameMove move = (GameMove)stanza.getExtension(GameMove.ELEMENT_NAME, GameMove.NAMESPACE); // If this is a move for the current game. if (move.getGameID() == gameID) { int position = move.getPosition(); // Make sure that the other player is allowed to make the move right now. if (reversi.getCurrentPlayer() == otherPlayer && reversi.isValidMove(position)) { reversi.makeMove(position); // Redraw board. ReversiPanel.this.repaint(); } else { // TODO: other user automatically forfeits! } // Execute move. } } }; connection.addAsyncStanzaListener(gameMoveListener, new PacketExtensionFilter(GameMove.ELEMENT_NAME, GameMove.NAMESPACE)); // TODO: at end of game, remove listener. } setOpaque(false); // Use absolute layout setLayout(null); // Set its size: setPreferredSize(new Dimension(TOTAL_WIDTH, TOTAL_HEIGHT)); // Make a new panel which is the game board grid: JPanel reversiBoard = new JPanel(new GridLayout(NUM_BLOCKS,NUM_BLOCKS,0,0)); reversiBoard.setOpaque(false); for (int i=0; i<NUM_BLOCKS*NUM_BLOCKS; i++) { ReversiBlock block = new ReversiBlock(this, i); blocks.add(block); reversiBoard.add(block); } // Add the reversi board to the main panel: add(reversiBoard); // Position it: reversiBoard.setBounds(BORDER_SIZE, BORDER_SIZE, BOARD_SIZE, BOARD_SIZE); // TODO: listen for click on resign button!! } /** * Sends a forfeit message to the other player. */ public void sendForfeit() throws SmackException.NotConnectedException { DefaultExtensionElement forfeit = new DefaultExtensionElement(GameForfeit.ELEMENT_NAME, GameForfeit.NAMESPACE); forfeit.setValue("gameID", Integer.toString(gameID)); Message message = new Message(); message.setTo(opponentJID); message.addExtension(forfeit); try { connection.sendStanza(message); } catch (InterruptedException e) { throw new IllegalStateException(e); } connection.removeAsyncStanzaListener(gameMoveListener); } public void paintComponent(Graphics g) { // Turn on anti-aliasing. Graphics2D g2d = (Graphics2D)g; g2d.setRenderingHint( RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON ); // Background g.drawImage(imageBackground, 0, 0, null); // Draw info panel components. // Draw the score. g.drawImage(imageScoreWhite, 3, BOARD_SIZE + BORDER_SIZE*2 + 7, null); g.drawImage(imageScoreBlack, 3, BOARD_SIZE + BORDER_SIZE*2 + 27, null); g.setFont(new Font("SansSerif", Font.BOLD, 12)); String whiteScore = String.valueOf(reversi.getWhiteScore()); String blackScore = String.valueOf(reversi.getBlackScore()); FontMetrics fm = g.getFontMetrics(); int width = Math.max(fm.stringWidth(whiteScore), fm.stringWidth(blackScore)); g.drawString(whiteScore, imageScoreBlack.getWidth(null) + 7 + width - fm.stringWidth(whiteScore), BOARD_SIZE + BORDER_SIZE*2 + 22); g.drawString(blackScore, imageScoreWhite.getWidth(null) + 7 + width - fm.stringWidth(blackScore), BOARD_SIZE + BORDER_SIZE*2 + 42); // Draw who's turn it is. if (!reversi.isGameFinished()) { if (reversi.getCurrentPlayer() == ReversiModel.BLACK) { g.drawImage(imageTurnBlack, 116, BOARD_SIZE + BORDER_SIZE*2 + 11, null); } else { g.drawImage(imageTurnWhite, 116, BOARD_SIZE + BORDER_SIZE*2 + 11, null); } } else { int me = otherPlayer==ReversiModel.BLACK?ReversiModel.WHITE:ReversiModel.BLACK; String whoWins = "Draw"; if (reversi.getBlackScore() > reversi.getWhiteScore()) { if (me == ReversiModel.BLACK) whoWins = "YOU WIN!"; else whoWins = "YOU LOST!"; } else if(reversi.getBlackScore() < reversi.getWhiteScore()) { if (me == ReversiModel.WHITE) whoWins = "YOU WIN!"; else whoWins = "YOU LOST!"; } g.drawString(whoWins, 130, BOARD_SIZE + BORDER_SIZE*2 + 20); } if (reversi.getCurrentPlayer() == otherPlayer) { g.drawImage(imageThem, 163, BOARD_SIZE + BORDER_SIZE*2 + 31, null); } else { g.drawImage(imageYou, 163, BOARD_SIZE + BORDER_SIZE*2 + 31, null); } // The resign button. g.drawImage(imageButtonResign, 281, BOARD_SIZE + BORDER_SIZE*2 + 17, null); } /** * A Reversi block (one of the squares of the grid). */ public class ReversiBlock extends JPanel { private static final long serialVersionUID = -8504469339731900770L; private ReversiPanel ui; private int index; public ReversiBlock(ReversiPanel ui, int index) { super(); this.ui = ui; this.index = index; setPreferredSize(new Dimension(BLOCK_SIZE,BLOCK_SIZE)); addMouseListener(new ReversiBlockMouseListener(this)); setOpaque(false); } /** * Returns a handle on the game UI. * * @return ReversiPanel of block. */ public ReversiPanel getReversiUI() { return ui; } /** * This block's index (0->63). * * @return Index of block */ public int getIndex() { return index; } protected void paintComponent(Graphics g) { super.paintComponent(g); // Turn on anti-aliasing: ((Graphics2D)g).setRenderingHint( RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON ); // Draw a disc in the block if the game says we should. int boardValue = reversi.getBoardValue(index); if (boardValue == org.jivesoftware.game.reversi.ReversiModel.BLACK) { drawDisc(g, Color.BLACK); } else if (reversi.getBoardValue(index) == org.jivesoftware.game.reversi.ReversiModel.WHITE) { drawDisc(g, Color.WHITE); } } /** * Draws the disc. * * @param g Graphics to draw * @param color Color */ private void drawDisc(Graphics g, Color color) { int position = BLOCK_SIZE - ((BLOCK_SIZE+DISC_SIZE)/2); g.setColor(color); g.fillOval(position, position, DISC_SIZE, DISC_SIZE); } } /** * A mouse listener for a Reversi block. */ public class ReversiBlockMouseListener extends MouseAdapter { private ReversiBlock block; public ReversiBlockMouseListener(ReversiBlock block) { this.block = block; } /** * Highlight the block if this block is a valid move. */ public void mouseEntered(MouseEvent e) { super.mouseEntered(e); if (reversi.getCurrentPlayer() != otherPlayer && reversi.isValidMove(block.getIndex())) { block.setCursor(Cursor.getPredefinedCursor(Cursor.HAND_CURSOR)); block.setBorder(BorderFactory.createLineBorder(Color.WHITE)); } } /** * Set the block color back to the default. */ public void mouseExited(MouseEvent e) { super.mouseExited(e); block.setCursor(Cursor.getDefaultCursor()); block.setBorder(null); } /** * If the click box is a valid move, register a move in this box. */ public void mouseClicked(MouseEvent e) { super.mouseClicked(e); // Make sure that it's our turn and that it's a valid move. if (reversi.getCurrentPlayer() != otherPlayer && reversi.isValidMove(block.getIndex())) { // Update the game model. reversi.makeMove(block.getIndex()); // Send the move to the other player. Message message = new Message(opponentJID); GameMove move = new GameMove(); move.setGameID(gameID); move.setPosition(block.getIndex()); message.addExtension(move); try { connection.sendStanza(message); } catch ( SmackException.NotConnectedException | InterruptedException e1 ) { Log.warning( "Unable to send move to " + message.getTo(), e1 ); } // Repaint board. ReversiPanel.this.repaint(); // Repaint all blocks. // for (Iterator it = block.getReversiUI().getBlocks().iterator(); it.hasNext();) { // ReversiBlock component = (ReversiBlock)it.next(); // component.repaint(); // } } } } public List<ReversiBlock> getBlocks() { return blocks; } }
package com.kq.concurrent.lock; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantLock; /** * @author kq * @date 2021-06-26 9:51 * @since 2020-0630 */ public class ReentrantLockDemo { public static void main(String[] args) { ReentrantLock lock = new ReentrantLock(); Runnable runnable = ()->{ lock.lock(); try { TimeUnit.SECONDS.sleep(1000); }catch (Exception e){ e.printStackTrace(); } }; Thread t = new Thread(runnable); t.start(); try { TimeUnit.SECONDS.sleep(3); }catch (Exception e){ e.printStackTrace(); } try{ lock.lock(); System.out.println("================================================="); } finally { lock.unlock(); } } }
/* * Copyright 2012 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.bitcoin.tools; import com.google.bitcoin.core.*; import com.google.bitcoin.crypto.KeyCrypterException; import com.google.bitcoin.discovery.DnsDiscovery; import com.google.bitcoin.discovery.PeerDiscovery; import com.google.bitcoin.params.MainNetParams; import com.google.bitcoin.params.RegTestParams; import com.google.bitcoin.params.TestNet3Params; import com.google.bitcoin.store.*; import com.google.bitcoin.utils.BriefLogFormatter; import joptsimple.OptionParser; import joptsimple.OptionSet; import joptsimple.OptionSpec; import joptsimple.util.DateConverter; import org.bitcoinj.wallet.Protos; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.spongycastle.util.encoders.Hex; import java.io.BufferedInputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.math.BigInteger; import java.net.InetAddress; import java.net.UnknownHostException; import java.text.ParseException; import java.util.Date; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.logging.Level; import java.util.logging.LogManager; /** * A command line tool for manipulating wallets and working with Bitcoin.<p> */ public class WalletTool { private static final Logger log = LoggerFactory.getLogger(WalletTool.class); private static final String HELP_TEXT = "WalletTool: print and manipulate wallets\n\n" + "Usage:\n" + ">>> GENERAL OPTIONS\n" + " --debuglog Enables logging from the core library.\n" + " --net=XXX Which network to connect to, defaults to PROD, can also be TEST or REGTEST.\n" + " --mode=FULL/SPV Whether to do full verification of the chain or just light mode.\n" + " --wallet=<file> Specifies what wallet file to load and save.\n" + " --chain=<file> Specifies the name of the file that stores the block chain.\n" + " --force Overrides any safety checks on the requested action.\n" + " --date Provide a date in form YYYY/MM/DD to any action that requires one.\n" + " --peers=1.2.3.4 Comma separaterd IP addresses/domain names for connections instead of peer discovery.\n" + " --condition=... Allows you to specify a numeric condition for other commands. The format is\n" + " one of the following operators = < > <= >= immediately followed by a number.\n" + " For example --condition=\">5.10\" or --condition=\"<=1\"\n" + " --password=... For an encrypted wallet, the password is provided here.\n" + "\n>>> ACTIONS\n" + " --action=DUMP Loads and prints the given wallet in textual form to stdout.\n" + " --action=RAW_DUMP Prints the wallet as a raw protobuf with no parsing or sanity checking applied.\n" + " --action=CREATE Makes a new wallet in the file specified by --wallet.\n" + " Will complain and require --force if the wallet already exists.\n" + " --action=ADD_KEY Adds a new key to the wallet, either specified or freshly generated.\n" + " If --date is specified, that's the creation date.\n" + " If --unixtime is specified, that's the creation time and it overrides --date.\n" + " If --privkey is specified, use as a hex/base58 encoded private key.\n" + " Don't specify --pubkey in that case, it will be derived automatically.\n" + " If --pubkey is specified, use as a hex/base58 encoded non-compressed public key.\n" + " --action=DELETE_KEY Removes the key specified by --pubkey or --addr from the wallet.\n" + " --action=SYNC Sync the wallet with the latest block chain (download new transactions).\n" + " If the chain file does not exist this will RESET the wallet.\n" + " --action=RESET Deletes all transactions from the wallet, for if you want to replay the chain.\n" + " --action=SEND Creates a transaction with the given --output from this wallet and broadcasts, eg:\n" + " --output=1GthXFQMktFLWdh5EPNGqbq3H6WdG8zsWj:1.245\n" + " You can repeat --output=address:value multiple times.\n" + " If the output destination starts with 04 and is 65 or 33 bytes long it will be\n" + " treated as a public key instead of an address and the send will use \n" + " <key> CHECKSIG as the script.\n" + " Other options include:\n" + " --fee=0.01 sets the tx fee\n" + " --locktime=1234 sets the lock time to block 1234\n" + " --locktime=2013/01/01 sets the lock time to 1st Jan 2013\n" + " --allow-unconfirmed will let you create spends of pending non-change outputs.\n" + "\n>>> WAITING\n" + "You can wait for the condition specified by the --waitfor flag to become true. Transactions and new\n" + "blocks will be processed during this time. When the waited for condition is met, the tx/block hash\n" + "will be printed. Waiting occurs after the --action is performed, if any is specified.\n\n" + " --waitfor=EVER Never quit.\n" + " --waitfor=WALLET_TX Any transaction that sends coins to or from the wallet.\n" + " --waitfor=BLOCK A new block that builds on the best chain.\n" + " --waitfor=BALANCE Waits until the wallets balance meets the --condition.\n"; private static OptionSpec<String> walletFileName; private static OptionSpec<ActionEnum> actionFlag; private static OptionSpec<NetworkEnum> netFlag; private static OptionSpec<Date> dateFlag; private static OptionSpec<Integer> unixtimeFlag; private static OptionSpec<WaitForEnum> waitForFlag; private static OptionSpec<ValidationMode> modeFlag; private static OptionSpec<String> conditionFlag; private static NetworkParameters params; private static File walletFile; private static OptionSet options; private static java.util.logging.Logger logger; private static BlockStore store; private static AbstractBlockChain chain; private static PeerGroup peers; private static Wallet wallet; private static File chainFileName; private static PeerDiscovery discovery; private static ValidationMode mode; private static String password; public static class Condition { public enum Type { // Less than, greater than, less than or equal, greater than or equal. EQUAL, LT, GT, LTE, GTE } Type type; String value; public Condition(String from) { if (from.length() < 2) throw new RuntimeException("Condition string too short: " + from); if (from.startsWith("<=")) type = Type.LTE; else if (from.startsWith(">=")) type = Type.GTE; else if (from.startsWith("<")) type = Type.LT; else if (from.startsWith("=")) type = Type.EQUAL; else if (from.startsWith(">")) type = Type.GT; else throw new RuntimeException("Unknown operator in condition: " + from); String s; switch (type) { case LT: case GT: case EQUAL: s = from.substring(1); break; case LTE: case GTE: s = from.substring(2); break; default: throw new RuntimeException("Unreachable"); } value = s; } public boolean matchBitcoins(BigInteger comparison) { try { BigInteger units = Utils.toNanoCoins(value); switch (type) { case LT: return comparison.compareTo(units) < 0; case GT: return comparison.compareTo(units) > 0; case EQUAL: return comparison.compareTo(units) == 0; case LTE: return comparison.compareTo(units) <= 0; case GTE: return comparison.compareTo(units) >= 0; default: throw new RuntimeException("Unreachable"); } } catch (NumberFormatException e) { System.err.println("Could not parse value from condition string: " + value); System.exit(1); return false; } } } private static Condition condition; public enum ActionEnum { NONE, DUMP, RAW_DUMP, CREATE, ADD_KEY, DELETE_KEY, SYNC, RESET, SEND }; public enum WaitForEnum { EVER, WALLET_TX, BLOCK, BALANCE }; public enum NetworkEnum { PROD, TEST, REGTEST } public enum ValidationMode { FULL, SPV } public static void main(String[] args) throws Exception { OptionParser parser = new OptionParser(); parser.accepts("help"); parser.accepts("force"); parser.accepts("debuglog"); walletFileName = parser.accepts("wallet") .withRequiredArg() .defaultsTo("wallet"); actionFlag = parser.accepts("action") .withRequiredArg() .ofType(ActionEnum.class); netFlag = parser.accepts("net") .withOptionalArg() .ofType(NetworkEnum.class) .defaultsTo(NetworkEnum.PROD); dateFlag = parser.accepts("date") .withRequiredArg() .ofType(Date.class) .withValuesConvertedBy(DateConverter.datePattern("yyyy/MM/dd")); waitForFlag = parser.accepts("waitfor") .withRequiredArg() .ofType(WaitForEnum.class); modeFlag = parser.accepts("mode") .withRequiredArg() .ofType(ValidationMode.class) .defaultsTo(ValidationMode.SPV); OptionSpec<String> chainFlag = parser.accepts("chain").withRequiredArg(); // For addkey/delkey. parser.accepts("pubkey").withRequiredArg(); parser.accepts("privkey").withRequiredArg(); parser.accepts("addr").withRequiredArg(); parser.accepts("peers").withRequiredArg(); OptionSpec<String> outputFlag = parser.accepts("output").withRequiredArg(); parser.accepts("value").withRequiredArg(); parser.accepts("fee").withRequiredArg(); unixtimeFlag = parser.accepts("unixtime").withRequiredArg().ofType(Integer.class); conditionFlag = parser.accepts("condition").withRequiredArg(); parser.accepts("locktime").withRequiredArg(); parser.accepts("allow-unconfirmed"); OptionSpec<String> passwordFlag = parser.accepts("password").withRequiredArg(); options = parser.parse(args); if (args.length == 0 || options.has("help") || options.nonOptionArguments().size() > 0) { System.out.println(HELP_TEXT); return; } if (options.has("debuglog")) { BriefLogFormatter.init(); log.info("Starting up ..."); } else { // Disable logspam unless there is a flag. logger = LogManager.getLogManager().getLogger(""); logger.setLevel(Level.SEVERE); } switch (netFlag.value(options)) { case PROD: params = MainNetParams.get(); chainFileName = new File("prodnet.chain"); break; case TEST: params = TestNet3Params.get(); chainFileName = new File("testnet.chain"); break; case REGTEST: params = RegTestParams.get(); chainFileName = new File("regtest.chain"); break; default: throw new RuntimeException("Unreachable."); } mode = modeFlag.value(options); // Allow the user to override the name of the chain used. if (options.has(chainFlag)) { chainFileName = new File(chainFlag.value(options)); } if (options.has("condition")) { condition = new Condition(conditionFlag.value(options)); } if (options.has(passwordFlag)) { password = passwordFlag.value(options); } ActionEnum action = ActionEnum.NONE; if (options.has(actionFlag)) action = actionFlag.value(options); walletFile = new File(walletFileName.value(options)); if (action == ActionEnum.CREATE) { createWallet(options, params, walletFile); return; // We're done. } if (!walletFile.exists()) { System.err.println("Specified wallet file " + walletFile + " does not exist. Try --action=CREATE"); return; } if (action == ActionEnum.RAW_DUMP) { // Just parse the protobuf and print, then bail out. Don't try and do a real deserialization. This is // useful mostly for investigating corrupted wallets. FileInputStream stream = new FileInputStream(walletFile); try { Protos.Wallet proto = WalletProtobufSerializer.parseToProto(stream); System.out.println(proto.toString()); return; } finally { stream.close(); } } try { WalletProtobufSerializer loader = new WalletProtobufSerializer(); wallet = loader.readWallet(new BufferedInputStream(new FileInputStream(walletFile))); if (!wallet.getParams().equals(params)) { System.err.println("Wallet does not match requested network parameters: " + wallet.getParams().getId() + " vs " + params.getId()); return; } } catch (Exception e) { System.err.println("Failed to load wallet '" + walletFile + "': " + e.getMessage()); e.printStackTrace(); return; } // What should we do? switch (action) { case DUMP: dumpWallet(); break; case ADD_KEY: addKey(); break; case DELETE_KEY: deleteKey(); break; case RESET: reset(); break; case SYNC: syncChain(); break; case SEND: if (!options.has(outputFlag)) { System.err.println("You must specify at least one --output=addr:value."); return; } BigInteger fee = BigInteger.ZERO; if (options.has("fee")) { fee = Utils.toNanoCoins((String)options.valueOf("fee")); } String lockTime = null; if (options.has("locktime")) { lockTime = (String) options.valueOf("locktime"); } boolean allowUnconfirmed = options.has("allow-unconfirmed"); send(outputFlag.values(options), fee, lockTime, allowUnconfirmed); break; } if (!wallet.isConsistent()) { System.err.println("************** WALLET IS INCONSISTENT *****************"); return; } saveWallet(walletFile); if (options.has(waitForFlag)) { WaitForEnum value; try { value = waitForFlag.value(options); } catch (Exception e) { System.err.println("Could not understand the --waitfor flag: Valid options are WALLET_TX, BLOCK, " + "BALANCE and EVER"); return; } wait(value); if (!wallet.isConsistent()) { System.err.println("************** WALLET IS INCONSISTENT *****************"); return; } saveWallet(walletFile); } shutdown(); } private static void send(List<String> outputs, BigInteger fee, String lockTimeStr, boolean allowUnconfirmed) { try { // Convert the input strings to outputs. Transaction t = new Transaction(params); for (String spec : outputs) { String[] parts = spec.split(":"); if (parts.length != 2) { System.err.println("Malformed output specification, must have two parts separated by :"); return; } String destination = parts[0]; try { BigInteger value = Utils.toNanoCoins(parts[1]); if (destination.startsWith("04") && (destination.length() == 130 || destination.length() == 66)) { // Treat as a raw public key. BigInteger pubKey = new BigInteger(destination, 16); ECKey key = new ECKey(null, pubKey); t.addOutput(value, key); } else { // Treat as an address. Address addr = new Address(params, destination); t.addOutput(value, addr); } } catch (WrongNetworkException e) { System.err.println("Malformed output specification, address is for a different network: " + parts[0]); return; } catch (AddressFormatException e) { System.err.println("Malformed output specification, could not parse as address: " + parts[0]); return; } catch (NumberFormatException e) { System.err.println("Malformed output specification, could not parse as value: " + parts[1]); } } Wallet.SendRequest req = Wallet.SendRequest.forTx(t); if (t.getOutputs().size() == 1 && t.getOutput(0).getValue().equals(wallet.getBalance())) { log.info("Emptying out wallet, recipient may get less than what you expect"); req.emptyWallet = true; } req.fee = fee; if (allowUnconfirmed) { wallet.allowSpendingUnconfirmedTransactions(); } if (password != null) { if (!wallet.checkPassword(password)) { System.err.println("Password is incorrect."); return; } req.aesKey = wallet.getKeyCrypter().deriveKey(password); } if (!wallet.completeTx(req)) { System.err.println("Insufficient funds: have " + Utils.bitcoinValueToFriendlyString(wallet.getBalance())); return; } try { if (lockTimeStr != null) { t.setLockTime(Transaction.parseLockTimeStr(lockTimeStr)); // For lock times to take effect, at least one output must have a non-final sequence number. t.getInputs().get(0).setSequenceNumber(0); // And because we modified the transaction after it was completed, we must re-sign the inputs. t.signInputs(Transaction.SigHash.ALL, wallet); } } catch (ParseException e) { System.err.println("Could not understand --locktime of " + lockTimeStr); return; } catch (ScriptException e) { throw new RuntimeException(e); } catch (KeyCrypterException e) { throw new RuntimeException(e); } t = req.tx; // Not strictly required today. setup(); peers.startAndWait(); // Wait for peers to connect, the tx to be sent to one of them and for it to be propagated across the // network. Once propagation is complete and we heard the transaction back from all our peers, it will // be committed to the wallet. peers.broadcastTransaction(t).get(); if (peers.getMinBroadcastConnections() == 1) { // Crap hack to work around some issue with Netty where the write future // completes before the remote peer actually hears the message. Thread.sleep(5000); } System.out.println(t.getHashAsString()); } catch (BlockStoreException e) { throw new RuntimeException(e); } catch (KeyCrypterException e) { throw new RuntimeException(e); } catch (InterruptedException e) { throw new RuntimeException(e); } catch (ExecutionException e) { throw new RuntimeException(e); } } private static void wait(WaitForEnum waitFor) throws BlockStoreException { final CountDownLatch latch = new CountDownLatch(1); setup(); switch (waitFor) { case EVER: break; case WALLET_TX: wallet.addEventListener(new AbstractWalletEventListener() { private void handleTx(Transaction tx) { System.out.println(tx.getHashAsString()); latch.countDown(); // Wake up main thread. } @Override public void onCoinsReceived(Wallet wallet, Transaction tx, BigInteger prevBalance, BigInteger newBalance) { // Runs in a peer thread. super.onCoinsReceived(wallet, tx, prevBalance, newBalance); handleTx(tx); } @Override public void onCoinsSent(Wallet wallet, Transaction tx, BigInteger prevBalance, BigInteger newBalance) { // Runs in a peer thread. super.onCoinsSent(wallet, tx, prevBalance, newBalance); handleTx(tx); } }); break; case BLOCK: peers.addEventListener(new AbstractPeerEventListener() { @Override public void onBlocksDownloaded(Peer peer, Block block, int blocksLeft) { super.onBlocksDownloaded(peer, block, blocksLeft); // Check if we already ran. This can happen if a block being received triggers download of more // blocks, or if we receive another block whilst the peer group is shutting down. if (latch.getCount() == 0) return; System.out.println(block.getHashAsString()); latch.countDown(); } }); break; case BALANCE: // Check if the balance already meets the given condition. if (condition.matchBitcoins(wallet.getBalance(Wallet.BalanceType.ESTIMATED))) { latch.countDown(); break; } wallet.addEventListener(new AbstractWalletEventListener() { @Override public synchronized void onChange() { super.onChange(); saveWallet(walletFile); BigInteger balance = wallet.getBalance(Wallet.BalanceType.ESTIMATED); if (condition.matchBitcoins(balance)) { System.out.println(Utils.bitcoinValueToFriendlyString(balance)); latch.countDown(); } } }); break; } peers.start(); try { latch.await(); } catch (InterruptedException e) { } } private static void reset() { // Delete the transactions and save. In future, reset the chain head pointer. wallet.clearTransactions(0); saveWallet(walletFile); } // Sets up all objects needed for network communication but does not bring up the peers. private static void setup() throws BlockStoreException { if (store != null) return; // Already done. // Will create a fresh chain if one doesn't exist or there is an issue with this one. if (!chainFileName.exists() && wallet.getTransactions(true).size() > 0) { // No chain, so reset the wallet as we will be downloading from scratch. System.out.println("Chain file is missing so clearing transactions from the wallet."); reset(); } if (mode == ValidationMode.SPV) { store = new SPVBlockStore(params, chainFileName); chain = new BlockChain(params, wallet, store); } else if (mode == ValidationMode.FULL) { FullPrunedBlockStore s = new H2FullPrunedBlockStore(params, chainFileName.getAbsolutePath(), 5000); store = s; chain = new FullPrunedBlockChain(params, wallet, s); } // This will ensure the wallet is saved when it changes. wallet.autosaveToFile(walletFile, 200, TimeUnit.MILLISECONDS, null); peers = new PeerGroup(params, chain); peers.setUserAgent("WalletTool", "1.0"); peers.addWallet(wallet); if (options.has("peers")) { String peersFlag = (String) options.valueOf("peers"); String[] peerAddrs = peersFlag.split(","); for (String peer : peerAddrs) { try { peers.addAddress(new PeerAddress(InetAddress.getByName(peer), params.getPort())); } catch (UnknownHostException e) { System.err.println("Could not understand peer domain name/IP address: " + peer + ": " + e.getMessage()); System.exit(1); } } } else { peers.addPeerDiscovery(new DnsDiscovery(params)); } } private static void syncChain() { try { setup(); int startTransactions = wallet.getTransactions(true).size(); DownloadListener listener = new DownloadListener(); peers.startAndWait(); peers.startBlockChainDownload(listener); try { listener.await(); } catch (InterruptedException e) { System.err.println("Chain download interrupted, quitting ..."); System.exit(1); } int endTransactions = wallet.getTransactions(true).size(); if (endTransactions > startTransactions) { System.out.println("Synced " + (endTransactions - startTransactions) + " transactions."); } } catch (BlockStoreException e) { System.err.println("Error reading block chain file " + chainFileName + ": " + e.getMessage()); e.printStackTrace(); } } private static void shutdown() { try { if (peers == null) return; // setup() never called so nothing to do. peers.stopAndWait(); saveWallet(walletFile); store.close(); wallet = null; } catch (BlockStoreException e) { throw new RuntimeException(e); } } private static void createWallet(OptionSet options, NetworkParameters params, File walletFile) throws IOException { if (walletFile.exists() && !options.has("force")) { System.err.println("Wallet creation requested but " + walletFile + " already exists, use --force"); return; } wallet = new Wallet(params); if (password != null) { wallet.encrypt(password); wallet.addNewEncryptedKey(password); } wallet.saveToFile(walletFile); } private static void saveWallet(File walletFile) { try { // This will save the new state of the wallet to a temp file then rename, in case anything goes wrong. wallet.saveToFile(walletFile); } catch (IOException e) { System.err.println("Failed to save wallet! Old wallet should be left untouched."); e.printStackTrace(); System.exit(1); } } private static void addKey() { ECKey key; long creationTimeSeconds = 0; if (options.has(unixtimeFlag)) { creationTimeSeconds = unixtimeFlag.value(options); } else if (options.has(dateFlag)) { creationTimeSeconds = dateFlag.value(options).getTime() / 1000; } if (options.has("privkey")) { String data = (String) options.valueOf("privkey"); if (data.charAt(0) == 'L') { DumpedPrivateKey dpk; try { dpk = new DumpedPrivateKey(params, data); } catch (AddressFormatException e) { System.err.println("Could not parse dumped private key " + data); return; } key = dpk.getKey(); } else { byte[] decode = Utils.parseAsHexOrBase58(data); if (decode == null) { System.err.println("Could not understand --privkey as either hex or base58: " + data); return; } key = new ECKey(new BigInteger(1, decode)); } if (options.has("pubkey")) { // Give the user a hint. System.out.println("You don't have to specify --pubkey when a private key is supplied."); } key.setCreationTimeSeconds(creationTimeSeconds); } else if (options.has("pubkey")) { byte[] pubkey = Utils.parseAsHexOrBase58((String) options.valueOf("pubkey")); key = new ECKey(null, pubkey); key.setCreationTimeSeconds(creationTimeSeconds); } else { // Freshly generated key. key = new ECKey(); if (creationTimeSeconds > 0) key.setCreationTimeSeconds(creationTimeSeconds); } if (wallet.findKeyFromPubKey(key.getPubKey()) != null) { System.err.println("That key already exists in this wallet."); return; } try { if (wallet.isEncrypted()) { if (password == null || !wallet.checkPassword(password)) { System.err.println("The password is incorrect."); return; } key = key.encrypt(wallet.getKeyCrypter(), wallet.getKeyCrypter().deriveKey(password)); } wallet.addKey(key); } catch (KeyCrypterException kce) { System.err.println("There was an encryption related error when adding the key. The error was '" + kce.getMessage() + "'."); } System.out.println(key.toAddress(params) + " " + key); } private static void deleteKey() { String pubkey = (String) options.valueOf("pubkey"); String addr = (String) options.valueOf("addr"); if (pubkey == null && addr == null) { System.err.println("One of --pubkey or --addr must be specified."); return; } ECKey key = null; if (pubkey != null) { key = wallet.findKeyFromPubKey(Hex.decode(pubkey)); } else { try { Address address = new Address(wallet.getParams(), addr); key = wallet.findKeyFromPubHash(address.getHash160()); } catch (AddressFormatException e) { System.err.println(addr + " does not parse as a Bitcoin address of the right network parameters."); return; } } if (key == null) { System.err.println("Wallet does not seem to contain that key."); return; } wallet.removeKey(key); } private static void dumpWallet() throws BlockStoreException { // Setup to get the chain height so we can estimate lock times, but don't wipe the transactions if it's not // there just for the dump case. if (chainFileName.exists()) setup(); System.out.println(wallet.toString(true, true, true, chain)); } }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hop.pipeline.transforms.getfilenames; import org.apache.commons.vfs2.FileObject; import org.apache.commons.vfs2.FileType; import org.apache.hop.core.Const; import org.apache.hop.core.ResultFile; import org.apache.hop.core.exception.HopException; import org.apache.hop.core.exception.HopTransformException; import org.apache.hop.core.fileinput.FileInputList; import org.apache.hop.core.row.RowDataUtil; import org.apache.hop.core.row.RowMeta; import org.apache.hop.core.util.Utils; import org.apache.hop.core.vfs.HopVfs; import org.apache.hop.i18n.BaseMessages; import org.apache.hop.pipeline.Pipeline; import org.apache.hop.pipeline.PipelineMeta; import org.apache.hop.pipeline.transform.BaseTransform; import org.apache.hop.pipeline.transform.ITransform; import org.apache.hop.pipeline.transform.TransformMeta; import java.io.IOException; import java.util.Date; import java.util.List; /** * Read all sorts of text files, convert them to rows and writes these to one or more output streams. * * @author Matt * @since 4-apr-2003 */ public class GetFileNames extends BaseTransform<GetFileNamesMeta, GetFileNamesData> implements ITransform<GetFileNamesMeta, GetFileNamesData> { private static final Class<?> PKG = GetFileNamesMeta.class; // For Translator public GetFileNames( TransformMeta transformMeta, GetFileNamesMeta meta, GetFileNamesData data, int copyNr, PipelineMeta pipelineMeta, Pipeline pipeline ) { super( transformMeta, meta, data, copyNr, pipelineMeta, pipeline ); } /** * Build an empty row based on the meta-data... * * @return */ private Object[] buildEmptyRow() { Object[] rowData = RowDataUtil.allocateRowData( data.outputRowMeta.size() ); return rowData; } public boolean processRow() throws HopException { if ( !meta.isFileField() ) { if ( data.filenr >= data.filessize ) { setOutputDone(); return false; } } else { if ( data.filenr >= data.filessize ) { // Grab one row from previous transform ... data.readrow = getRow(); } if ( data.readrow == null ) { setOutputDone(); return false; } if ( first ) { first = false; data.inputRowMeta = getInputRowMeta(); data.outputRowMeta = data.inputRowMeta.clone(); meta.getFields( data.outputRowMeta, getTransformName(), null, null, this, metadataProvider ); // Get total previous fields data.totalpreviousfields = data.inputRowMeta.size(); // Check is filename field is provided if ( Utils.isEmpty( meta.getDynamicFilenameField() ) ) { logError( BaseMessages.getString( PKG, "GetFileNames.Log.NoField" ) ); throw new HopException( BaseMessages.getString( PKG, "GetFileNames.Log.NoField" ) ); } // cache the position of the field if ( data.indexOfFilenameField < 0 ) { data.indexOfFilenameField = data.inputRowMeta.indexOfValue( meta.getDynamicFilenameField() ); if ( data.indexOfFilenameField < 0 ) { // The field is unreachable ! logError( BaseMessages.getString( PKG, "GetFileNames.Log.ErrorFindingField", meta .getDynamicFilenameField() ) ); throw new HopException( BaseMessages.getString( PKG, "GetFileNames.Exception.CouldnotFindField", meta.getDynamicFilenameField() ) ); } } // If wildcard field is specified, Check if field exists if ( !Utils.isEmpty( meta.getDynamicWildcardField() ) ) { if ( data.indexOfWildcardField < 0 ) { data.indexOfWildcardField = data.inputRowMeta.indexOfValue( meta.getDynamicWildcardField() ); if ( data.indexOfWildcardField < 0 ) { // The field is unreachable ! logError( BaseMessages.getString( PKG, "GetFileNames.Log.ErrorFindingField" ) + "[" + meta.getDynamicWildcardField() + "]" ); throw new HopException( BaseMessages.getString( PKG, "GetFileNames.Exception.CouldnotFindField", meta.getDynamicWildcardField() ) ); } } } // If ExcludeWildcard field is specified, Check if field exists if ( !Utils.isEmpty( meta.getDynamicExcludeWildcardField() ) ) { if ( data.indexOfExcludeWildcardField < 0 ) { data.indexOfExcludeWildcardField = data.inputRowMeta.indexOfValue( meta.getDynamicExcludeWildcardField() ); if ( data.indexOfExcludeWildcardField < 0 ) { // The field is unreachable ! logError( BaseMessages.getString( PKG, "GetFileNames.Log.ErrorFindingField" ) + "[" + meta.getDynamicExcludeWildcardField() + "]" ); throw new HopException( BaseMessages.getString( PKG, "GetFileNames.Exception.CouldnotFindField", meta.getDynamicExcludeWildcardField() ) ); } } } } } // end if first try { Object[] outputRow = buildEmptyRow(); int outputIndex = 0; Object[] extraData = new Object[ data.nrTransformFields ]; if ( meta.isFileField() ) { if ( data.filenr >= data.filessize ) { // Get value of dynamic filename field ... String filename = getInputRowMeta().getString( data.readrow, data.indexOfFilenameField ); String wildcard = ""; if ( data.indexOfWildcardField >= 0 ) { wildcard = getInputRowMeta().getString( data.readrow, data.indexOfWildcardField ); } String excludewildcard = ""; if ( data.indexOfExcludeWildcardField >= 0 ) { excludewildcard = getInputRowMeta().getString( data.readrow, data.indexOfExcludeWildcardField ); } String[] filesname = { filename }; String[] filesmask = { wildcard }; String[] excludefilesmask = { excludewildcard }; String[] filesrequired = { "N" }; boolean[] includesubfolders = { meta.isDynamicIncludeSubFolders() }; // Get files list data.files = meta.getDynamicFileList( this, filesname, filesmask, excludefilesmask, filesrequired, includesubfolders ); data.filessize = data.files.nrOfFiles(); data.filenr = 0; } // Clone current input row outputRow = data.readrow.clone(); } if ( data.filessize > 0 ) { data.file = data.files.getFile( data.filenr ); if ( meta.isAddResultFile() ) { // Add this to the result file names... ResultFile resultFile = new ResultFile( ResultFile.FILE_TYPE_GENERAL, data.file, getPipelineMeta().getName(), getTransformName() ); resultFile.setComment( BaseMessages.getString( PKG, "GetFileNames.Log.FileReadByTransform" ) ); addResultFile( resultFile ); } // filename extraData[ outputIndex++ ] = HopVfs.getFilename( data.file ); // short_filename extraData[ outputIndex++ ] = data.file.getName().getBaseName(); try { // Path extraData[ outputIndex++ ] = HopVfs.getFilename( data.file.getParent() ); // type extraData[ outputIndex++ ] = data.file.getType().toString(); // exists extraData[ outputIndex++ ] = Boolean.valueOf( data.file.exists() ); // ishidden extraData[ outputIndex++ ] = Boolean.valueOf( data.file.isHidden() ); // isreadable extraData[ outputIndex++ ] = Boolean.valueOf( data.file.isReadable() ); // iswriteable extraData[ outputIndex++ ] = Boolean.valueOf( data.file.isWriteable() ); // lastmodifiedtime extraData[ outputIndex++ ] = new Date( data.file.getContent().getLastModifiedTime() ); // size Long size = null; if ( data.file.getType().equals( FileType.FILE ) ) { size = new Long( data.file.getContent().getSize() ); } extraData[ outputIndex++ ] = size; } catch ( IOException e ) { throw new HopException( e ); } // extension extraData[ outputIndex++ ] = data.file.getName().getExtension(); // uri extraData[ outputIndex++ ] = data.file.getName().getURI(); // rooturi extraData[ outputIndex++ ] = data.file.getName().getRootURI(); // See if we need to add the row number to the row... if ( meta.includeRowNumber() && !Utils.isEmpty( meta.getRowNumberField() ) ) { extraData[ outputIndex++ ] = new Long( data.rownr ); } data.rownr++; // Add row data outputRow = RowDataUtil.addRowData( outputRow, data.totalpreviousfields, extraData ); // Send row putRow( data.outputRowMeta, outputRow ); if ( meta.getRowLimit() > 0 && data.rownr >= meta.getRowLimit() ) { // limit has been reached: stop now. setOutputDone(); return false; } } } catch ( Exception e ) { throw new HopTransformException( e ); } data.filenr++; if ( checkFeedback( getLinesInput() ) ) { if ( log.isBasic() ) { logBasic( BaseMessages.getString( PKG, "GetFileNames.Log.NrLine", "" + getLinesInput() ) ); } } return true; } private void handleMissingFiles() throws HopException { if ( meta.isdoNotFailIfNoFile() && data.files.nrOfFiles() == 0 ) { logBasic( BaseMessages.getString( PKG, "GetFileNames.Log.NoFile" ) ); return; } List<FileObject> nonExistantFiles = data.files.getNonExistantFiles(); if ( nonExistantFiles.size() != 0 ) { String message = FileInputList.getRequiredFilesDescription( nonExistantFiles ); logBasic( "ERROR: Missing " + message ); throw new HopException( "Following required files are missing: " + message ); } List<FileObject> nonAccessibleFiles = data.files.getNonAccessibleFiles(); if ( nonAccessibleFiles.size() != 0 ) { String message = FileInputList.getRequiredFilesDescription( nonAccessibleFiles ); logBasic( "WARNING: Not accessible " + message ); throw new HopException( "Following required files are not accessible: " + message ); } } public boolean init(){ if ( super.init() ) { try { // Create the output row meta-data data.outputRowMeta = new RowMeta(); meta.getFields( data.outputRowMeta, getTransformName(), null, null, this, metadataProvider ); // get the // metadata // populated data.nrTransformFields = data.outputRowMeta.size(); if ( !meta.isFileField() ) { data.files = meta.getFileList( this ); data.filessize = data.files.nrOfFiles(); handleMissingFiles(); } else { data.filessize = 0; } } catch ( Exception e ) { logError( "Error initializing transform: " + e.toString() ); logError( Const.getStackTracker( e ) ); return false; } data.rownr = 1L; data.filenr = 0; data.totalpreviousfields = 0; return true; } return false; } public void dispose(){ if ( data.file != null ) { try { data.file.close(); data.file = null; } catch ( Exception e ) { // Ignore close errors } } super.dispose(); } }
/* * Corona-Warn-App * * SAP SE and all other contributors / * copyright owners license this file to you under the Apache * License, Version 2.0 (the "License"); you may not use this * file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package app.coronawarn.server.services.distribution.objectstore.publish; import java.nio.file.Path; /** * Represents a file of a specific category: Index files. * <br> * Index files contain information about the available packages on the S3, which makes discovery of * those files easier for the consumers. Index files are assembled with the name "index", but should * be published on S3 w/o the index part, to makee.g.: * <br> * /diagnosis-keys/date/2020-12-12/index -> /diagnosis-keys/date/2020-12-12 */ public class LocalIndexFile extends LocalFile { /** * the suffix for index files. */ private static final String INDEX_FILE_SUFFIX = "/index"; /** * Constructs a new file, which is treated as an index file. * * @param file the file on the disk * @param basePath the base path, from where the file was loaded. This will be used in order to determine the S3 key */ public LocalIndexFile(Path file, Path basePath) { super(file, basePath); } @Override protected String createS3Key(Path file, Path rootFolder) { String s3Key = super.createS3Key(file, rootFolder); return s3Key.substring(0, s3Key.length() - INDEX_FILE_SUFFIX.length()); } }
package com.ace.service.api; import com.ace.controller.api.concerns.Query; import com.ace.entity.Account; import com.ace.entity.Room; import com.ace.entity.RoomReport; import com.ace.entity.Schedule; import java.sql.Date; import java.util.List; public interface RoomService { List<Room> query(Account account, Query query); Room show(Long id); List<Schedule> schedule(Long room, Date date); String protocol(Long room); }
/* * Copyright 2009-2017 Aarhus University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package dk.brics.tajs.monitoring; /* * Analysis phases. */ public enum AnalysisPhase { // loading of the initial files, and setup of initial state INITIALIZATION, // the actual fix-point solving phase ANALYSIS, // the scanning phase, data should be collected here SCAN }
/* * The MIT License (MIT) * * Copyright (c) 2016-2020, Hamdi Douss * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom * the Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES * OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE * OR OTHER DEALINGS IN THE SOFTWARE. */ /** * Scalars whose definitions and actual values depend on conditions. */ package com.aljebra.scalar.condition;
/* * MovieAttributes.java * Transform * * Copyright (c) 2009-2010 Flagstone Software Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of Flagstone Software Ltd. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package com.flagstone.transform; import java.io.IOException; import com.flagstone.transform.coder.Coder; import com.flagstone.transform.coder.Context; import com.flagstone.transform.coder.SWFDecoder; import com.flagstone.transform.coder.SWFEncoder; /** * The MovieAttributes tag defines characteristics of a Movie. It contains * several flags to indicate types of objects in the movie and whether any * hardware acceleration should be used if available. * * For Flash Version 8 and above it must be the first object after the * MovieHeader. */ public final class MovieAttributes implements MovieTag { /** Format string used in toString() method. */ private static final String FORMAT = "MovieAttributes: {" + " metadata=%b; as3=%b; network=%b; gpu=%b; directBlit=%b}"; /** The set of encoded attributes. */ private transient int attributes; /** * Creates a new MovieAttributes object. */ public MovieAttributes() { // Empty } /** * Creates and initialises a MovieAttributes object using values encoded * in the Flash binary format. * * @param coder * an SWFDecoder object that contains the encoded Flash data. * * @throws IOException * if an error occurs while decoding the data. */ public MovieAttributes(final SWFDecoder coder) throws IOException { int length = coder.readUnsignedShort() & Coder.LENGTH_FIELD; if (length == Coder.IS_EXTENDED) { length = coder.readInt(); } attributes = coder.readByte(); coder.skip(length - 1); } /** * Creates and initialises a MovieAttributes object using the values copied * from another MovieAttributes object. * * @param object * a MovieAttributes object from which the values will be * copied. */ public MovieAttributes(final MovieAttributes object) { attributes = object.attributes; } /** * Does the Movie contain Actionscript 3 code. * * @return true if the movie contains at least one DoABC tag * containing Actionscript 3 byte-codes. */ public boolean hasMetaData() { return (attributes & Coder.BIT4) != 0; } /** * Does the Movie contain meta-data. * * @return true if the movie contains a MovieMetaData tag. */ public boolean hasAS3() { return (attributes & Coder.BIT3) != 0; } /** * Does the Flash Player use direct bit block transfer to accelerate * graphics. * * @return true if the Flash Player will use direct bit block transfer. */ public boolean useDirectBlit() { return (attributes & Coder.BIT6) != 0; } /** * Instruct the Flash Player to use direct bit block transfer to accelerate * graphics. * * @param useBlit use graphics hardware accelerations. */ public void setUseDirectBlit(final boolean useBlit) { if (useBlit) { attributes |= Coder.BIT6; } else { attributes &= ~Coder.BIT6; } } /** * Does the Flash Player use the graphics processor to accelerate * compositing - if available. * * @return true if the Flash Player will use the graphics process for * compositing. */ public boolean useGPU() { return (attributes & Coder.BIT5) != 0; } /** * Instruct the Flash Player to use the graphics processor to accelerate * compositing - if available. * * @param useGPU use graphics processor for compositing. */ public void setUseGPU(final boolean useGPU) { if (useGPU) { attributes |= Coder.BIT5; } else { attributes &= ~Coder.BIT5; } } /** * Does the Flash Player use the network for loading resources even if the * movie is loaded from the local file system. * @return true if the network will be used even if the movie is loaded * locally, false otherwise. */ public boolean useNetwork() { return (attributes & Coder.BIT0) != 0; } /** * Instructor the Flash Player use the network for loading resources even * if the movie is loaded from the local file system. * @param useNetwork use the network even if the movie is loaded locally. */ public void setUseNetwork(final boolean useNetwork) { if (useNetwork) { attributes |= Coder.BIT0; } else { attributes &= ~Coder.BIT0; } } /** {@inheritDoc} */ public MovieAttributes copy() { return new MovieAttributes(this); } /** {@inheritDoc} */ @Override public String toString() { return String.format(FORMAT, hasMetaData(), hasAS3(), useNetwork(), useGPU(), useDirectBlit()); } /** {@inheritDoc} */ public int prepareToEncode(final Context context) { // CHECKSTYLE IGNORE MagicNumberCheck FOR NEXT 1 LINES return 6; } /** {@inheritDoc} */ public void encode(final SWFEncoder coder, final Context context) throws IOException { // CHECKSTYLE IGNORE MagicNumberCheck FOR NEXT 2 LINES coder.writeShort((MovieTypes.FILE_ATTRIBUTES << Coder.LENGTH_FIELD_SIZE) | 4); coder.writeByte(attributes); coder.writeByte(0); coder.writeByte(0); coder.writeByte(0); } }
package betterwithmods.proxy; import net.minecraft.entity.player.EntityPlayer; import net.minecraft.item.Item; import net.minecraft.world.World; import net.minecraftforge.fml.common.network.IGuiHandler; public class CommonProxy implements IGuiHandler { public Item addItemBlockModel(Item item) { return item; } public void registerRenderInformation() { } public void registerEvents() { } public void initRenderers() { } public boolean isClientside() { return false; } public void registerColors() { } @Override public Object getServerGuiElement(int ID, EntityPlayer player, World world, int x, int y, int z) { // TODO Auto-generated method stub return null; } @Override public Object getClientGuiElement(int ID, EntityPlayer player, World world, int x, int y, int z) { // TODO Auto-generated method stub return null; } }
/** * Copyright 2018 ADLINK Technology Limited. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.zeligsoft.domain.omg.corba.ui.viewcustomizers; import org.eclipse.emf.common.util.EList; import org.eclipse.emf.ecore.EObject; import org.eclipse.gmf.runtime.notation.View; import com.zeligsoft.base.ui.viewcustomizers.BaseViewCustomizer; import com.zeligsoft.base.zdl.util.ZDLUtil; import com.zeligsoft.domain.omg.corba.CORBADomainNames; /** * * @author ysroh * */ public class CORBAInterfaceViewCustomizer extends BaseViewCustomizer { /** * The only instance of <code>CORBAInterfaceViewCustomizer</code> */ public static CORBAInterfaceViewCustomizer INSTANCE = new CORBAInterfaceViewCustomizer(); private CORBAInterfaceViewCustomizer() { // do not instantiate } /** * Sets the visible property for various compartments. * * @param view * the <code>Node</code> view for a <code>Component</code>. */ @Override @SuppressWarnings("unchecked") public void customizeView(View view) { super.customizeView(view); EObject element = view.getElement(); if (ZDLUtil.isZDLConcept(element, CORBADomainNames.CORBAINTERFACE) == true) { EList<View> children = view.getChildren(); for (View childView : children) { String childViewType = childView.getType(); if (ATTRIBUTE_LIST_VIEW_NAME.equals(childViewType)) { childView.setVisible(true); } } } } }
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed * with this work for additional information regarding copyright * ownership. The ASF licenses this file to you under the Apache * License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.drill.exec.physical.impl.common; import java.io.IOException; import java.util.LinkedList; import java.util.List; import org.apache.drill.common.expression.ErrorCollector; import org.apache.drill.common.expression.ErrorCollectorImpl; import org.apache.drill.common.expression.LogicalExpression; import org.apache.drill.common.expression.FunctionCall; import org.apache.drill.common.expression.ExpressionPosition; import org.apache.drill.common.exceptions.DrillRuntimeException; import org.apache.drill.common.logical.data.NamedExpression; import org.apache.drill.common.types.TypeProtos; import org.apache.drill.common.types.TypeProtos.MinorType; import org.apache.drill.common.types.Types; import org.apache.drill.exec.compile.sig.GeneratorMapping; import org.apache.drill.exec.compile.sig.MappingSet; import org.apache.drill.exec.exception.ClassTransformationException; import org.apache.drill.exec.exception.SchemaChangeException; import org.apache.drill.exec.expr.ClassGenerator; import org.apache.drill.exec.expr.ClassGenerator.HoldingContainer; import org.apache.drill.exec.expr.CodeGenerator; import org.apache.drill.exec.expr.ExpressionTreeMaterializer; import org.apache.drill.exec.expr.TypeHelper; import org.apache.drill.exec.expr.ValueVectorReadExpression; import org.apache.drill.exec.expr.ValueVectorWriteExpression; import org.apache.drill.exec.expr.fn.FunctionGenerationHelper; import org.apache.drill.exec.memory.BufferAllocator; import org.apache.drill.exec.ops.FragmentContext; import org.apache.drill.exec.record.MaterializedField; import org.apache.drill.exec.record.RecordBatch; import org.apache.drill.exec.record.TypedFieldId; import org.apache.drill.exec.record.VectorContainer; import org.apache.drill.exec.resolver.TypeCastRules; import org.apache.drill.exec.vector.ValueVector; import com.sun.codemodel.JConditional; import com.sun.codemodel.JExpr; public class ChainedHashTable { static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ChainedHashTable.class); private static final GeneratorMapping KEY_MATCH_BUILD = GeneratorMapping.create("setupInterior" /* setup method */, "isKeyMatchInternalBuild" /* eval method */, null /* reset */, null /* cleanup */); private static final GeneratorMapping KEY_MATCH_PROBE = GeneratorMapping.create("setupInterior" /* setup method */, "isKeyMatchInternalProbe" /* eval method */, null /* reset */, null /* cleanup */); private static final GeneratorMapping GET_HASH_BUILD = GeneratorMapping.create("doSetup" /* setup method */, "getHashBuild" /* eval method */, null /* reset */, null /* cleanup */); private static final GeneratorMapping GET_HASH_PROBE = GeneratorMapping.create("doSetup" /* setup method */, "getHashProbe" /* eval method */, null /* reset */, null /* cleanup */); private static final GeneratorMapping SET_VALUE = GeneratorMapping.create("setupInterior" /* setup method */, "setValue" /* eval method */, null /* reset */, null /* cleanup */); private static final GeneratorMapping OUTPUT_KEYS = GeneratorMapping.create("setupInterior" /* setup method */, "outputRecordKeys" /* eval method */, null /* reset */, null /* cleanup */) ; // GM for putting constant expression into method "setupInterior" private static final GeneratorMapping SETUP_INTERIOR_CONSTANT = GeneratorMapping.create("setupInterior" /* setup method */, "setupInterior" /* eval method */, null /* reset */, null /* cleanup */); // GM for putting constant expression into method "doSetup" private static final GeneratorMapping DO_SETUP_CONSTANT = GeneratorMapping.create("doSetup" /* setup method */, "doSetup" /* eval method */, null /* reset */, null /* cleanup */); private final MappingSet KeyMatchIncomingBuildMapping = new MappingSet("incomingRowIdx", null, "incomingBuild", null, SETUP_INTERIOR_CONSTANT, KEY_MATCH_BUILD); private final MappingSet KeyMatchIncomingProbeMapping = new MappingSet("incomingRowIdx", null, "incomingProbe", null, SETUP_INTERIOR_CONSTANT, KEY_MATCH_PROBE); private final MappingSet KeyMatchHtableMapping = new MappingSet("htRowIdx", null, "htContainer", null, SETUP_INTERIOR_CONSTANT, KEY_MATCH_BUILD); private final MappingSet KeyMatchHtableProbeMapping = new MappingSet("htRowIdx", null, "htContainer", null, SETUP_INTERIOR_CONSTANT, KEY_MATCH_PROBE); private final MappingSet GetHashIncomingBuildMapping = new MappingSet("incomingRowIdx", null, "incomingBuild", null, DO_SETUP_CONSTANT, GET_HASH_BUILD); private final MappingSet GetHashIncomingProbeMapping = new MappingSet("incomingRowIdx", null, "incomingProbe", null, DO_SETUP_CONSTANT, GET_HASH_PROBE); private final MappingSet SetValueMapping = new MappingSet("incomingRowIdx" /* read index */, "htRowIdx" /* write index */, "incomingBuild" /* read container */, "htContainer" /* write container */, SETUP_INTERIOR_CONSTANT, SET_VALUE); private final MappingSet OutputRecordKeysMapping = new MappingSet("htRowIdx" /* read index */, "outRowIdx" /* write index */, "htContainer" /* read container */, "outgoing" /* write container */, SETUP_INTERIOR_CONSTANT, OUTPUT_KEYS); private HashTableConfig htConfig; private final FragmentContext context; private final BufferAllocator allocator; private final RecordBatch incomingBuild; private final RecordBatch incomingProbe; private final RecordBatch outgoing; public ChainedHashTable(HashTableConfig htConfig, FragmentContext context, BufferAllocator allocator, RecordBatch incomingBuild, RecordBatch incomingProbe, RecordBatch outgoing) { this.htConfig = htConfig; this.context = context; this.allocator = allocator; this.incomingBuild = incomingBuild; this.incomingProbe = incomingProbe; this.outgoing = outgoing; } public HashTable createAndSetupHashTable (TypedFieldId[] outKeyFieldIds) throws ClassTransformationException, IOException, SchemaChangeException { CodeGenerator<HashTable> top = CodeGenerator.get(HashTable.TEMPLATE_DEFINITION, context.getFunctionRegistry()); ClassGenerator<HashTable> cg = top.getRoot(); ClassGenerator<HashTable> cgInner = cg.getInnerGenerator("BatchHolder"); LogicalExpression[] keyExprsBuild = new LogicalExpression[htConfig.getKeyExprsBuild().length]; LogicalExpression[] keyExprsProbe = null; boolean isProbe = (htConfig.getKeyExprsProbe() != null) ; if (isProbe) { keyExprsProbe = new LogicalExpression[htConfig.getKeyExprsProbe().length]; } ErrorCollector collector = new ErrorCollectorImpl(); VectorContainer htContainerOrig = new VectorContainer(); // original ht container from which others may be cloned LogicalExpression[] htKeyExprs = new LogicalExpression[htConfig.getKeyExprsBuild().length]; TypedFieldId[] htKeyFieldIds = new TypedFieldId[htConfig.getKeyExprsBuild().length]; int i = 0; for (NamedExpression ne : htConfig.getKeyExprsBuild()) { final LogicalExpression expr = ExpressionTreeMaterializer.materialize(ne.getExpr(), incomingBuild, collector, context.getFunctionRegistry()); if (collector.hasErrors()) { throw new SchemaChangeException("Failure while materializing expression. " + collector.toErrorString()); } if (expr == null) { continue; } keyExprsBuild[i] = expr; final MaterializedField outputField = MaterializedField.create(ne.getRef(), expr.getMajorType()); // create a type-specific ValueVector for this key ValueVector vv = TypeHelper.getNewVector(outputField, allocator); vv.allocateNew(); htKeyFieldIds[i] = htContainerOrig.add(vv); i++; } if (isProbe) { i = 0; for (NamedExpression ne : htConfig.getKeyExprsProbe()) { final LogicalExpression expr = ExpressionTreeMaterializer.materialize(ne.getExpr(), incomingProbe, collector, context.getFunctionRegistry()); if (collector.hasErrors()) { throw new SchemaChangeException("Failure while materializing expression. " + collector.toErrorString()); } if (expr == null) { continue; } keyExprsProbe[i] = expr; i++; } } // generate code for isKeyMatch(), setValue(), getHash() and outputRecordKeys() setupIsKeyMatchInternal(cgInner, KeyMatchIncomingBuildMapping, KeyMatchHtableMapping, keyExprsBuild, htKeyFieldIds); setupIsKeyMatchInternal(cgInner, KeyMatchIncomingProbeMapping, KeyMatchHtableProbeMapping, keyExprsProbe, htKeyFieldIds) ; setupSetValue(cgInner, keyExprsBuild, htKeyFieldIds); if (outgoing != null) { if (outKeyFieldIds.length > htConfig.getKeyExprsBuild().length) { throw new IllegalArgumentException("Mismatched number of output key fields."); } } setupOutputRecordKeys(cgInner, htKeyFieldIds, outKeyFieldIds); /* Before generating the code for hashing the build and probe expressions * examine the expressions to make sure they are of the same type, add casts if necessary. * If they are not of the same type, hashing the same value of different types will yield different hash values. * NOTE: We add the cast only for the hash function, comparator function can handle the case * when expressions are different (for eg we have comparator functions that compare bigint and float8) * However for the hash to work correctly we would need to apply the cast. */ addLeastRestrictiveCasts(keyExprsBuild, keyExprsProbe); setupGetHash(cg /* use top level code generator for getHash */, GetHashIncomingBuildMapping, keyExprsBuild, false); setupGetHash(cg /* use top level code generator for getHash */, GetHashIncomingProbeMapping, keyExprsProbe, true); HashTable ht = context.getImplementationClass(top); ht.setup(htConfig, context, allocator, incomingBuild, incomingProbe, outgoing, htContainerOrig); return ht; } private void setupIsKeyMatchInternal(ClassGenerator<HashTable> cg, MappingSet incomingMapping, MappingSet htableMapping, LogicalExpression[] keyExprs, TypedFieldId[] htKeyFieldIds) throws SchemaChangeException { cg.setMappingSet(incomingMapping); if (keyExprs == null || keyExprs.length == 0) { cg.getEvalBlock()._return(JExpr.FALSE); return; } int i = 0; for (LogicalExpression expr : keyExprs) { cg.setMappingSet(incomingMapping); HoldingContainer left = cg.addExpr(expr, false); cg.setMappingSet(htableMapping); ValueVectorReadExpression vvrExpr = new ValueVectorReadExpression(htKeyFieldIds[i++]); HoldingContainer right = cg.addExpr(vvrExpr, false); // next we wrap the two comparison sides and add the expression block for the comparison. LogicalExpression f = FunctionGenerationHelper.getComparator(left, right, context.getFunctionRegistry()); HoldingContainer out = cg.addExpr(f, false); // check if two values are not equal (comparator result != 0) JConditional jc = cg.getEvalBlock()._if(out.getValue().ne(JExpr.lit(0))); jc._then()._return(JExpr.FALSE); } // All key expressions compared equal, so return TRUE cg.getEvalBlock()._return(JExpr.TRUE); } private void setupSetValue(ClassGenerator<HashTable> cg, LogicalExpression[] keyExprs, TypedFieldId[] htKeyFieldIds) throws SchemaChangeException { cg.setMappingSet(SetValueMapping); int i = 0; for (LogicalExpression expr : keyExprs) { ValueVectorWriteExpression vvwExpr = new ValueVectorWriteExpression(htKeyFieldIds[i++], expr, true) ; HoldingContainer hc = cg.addExpr(vvwExpr, false); // this will write to the htContainer at htRowIdx cg.getEvalBlock()._if(hc.getValue().eq(JExpr.lit(0)))._then()._return(JExpr.FALSE); } cg.getEvalBlock()._return(JExpr.TRUE); } private void setupOutputRecordKeys(ClassGenerator<HashTable> cg, TypedFieldId[] htKeyFieldIds, TypedFieldId[] outKeyFieldIds) { cg.setMappingSet(OutputRecordKeysMapping); if (outKeyFieldIds != null) { for (int i = 0; i < outKeyFieldIds.length; i++) { ValueVectorReadExpression vvrExpr = new ValueVectorReadExpression(htKeyFieldIds[i]); ValueVectorWriteExpression vvwExpr = new ValueVectorWriteExpression(outKeyFieldIds[i], vvrExpr, true); HoldingContainer hc = cg.addExpr(vvwExpr); cg.getEvalBlock()._if(hc.getValue().eq(JExpr.lit(0)))._then()._return(JExpr.FALSE); } cg.getEvalBlock()._return(JExpr.TRUE); } else { cg.getEvalBlock()._return(JExpr.FALSE); } } private void addLeastRestrictiveCasts(LogicalExpression[] keyExprsBuild, LogicalExpression[] keyExprsProbe) { // If we don't have probe expressions then nothing to do get out if (keyExprsProbe == null) { return; } assert keyExprsBuild.length == keyExprsProbe.length; for (int i = 0; i < keyExprsBuild.length; i++) { MinorType buildType = keyExprsBuild[i].getMajorType().getMinorType(); MinorType probeType = keyExprsProbe[i].getMajorType().getMinorType(); if (buildType != probeType) { // We need to add a cast to one of the expressions List<MinorType> types = new LinkedList<>(); types.add(buildType); types.add(probeType); MinorType result = TypeCastRules.getLeastRestrictiveType(types); // Add the cast List<LogicalExpression> args = new LinkedList<>(); if (result == null) { throw new DrillRuntimeException(String.format("Join conditions cannot be compared failing build expression: %s failing probe expression: %s", keyExprsBuild[i].getMajorType().toString(), keyExprsProbe[i].getMajorType().toString())); } else if (result != buildType) { // Add a cast expression on top of the build expression args.add(keyExprsBuild[i]); FunctionCall castCall = new FunctionCall("cast" + result.toString().toUpperCase(), args, ExpressionPosition.UNKNOWN); keyExprsBuild[i] = ExpressionTreeMaterializer.materialize(castCall, incomingBuild, new ErrorCollectorImpl(), context.getFunctionRegistry()); } else if (result != probeType) { args.add(keyExprsProbe[i]); FunctionCall castCall = new FunctionCall("cast" + result.toString().toUpperCase(), args, ExpressionPosition.UNKNOWN); keyExprsProbe[i] = ExpressionTreeMaterializer.materialize(castCall, incomingProbe, new ErrorCollectorImpl(), context.getFunctionRegistry()); } } } } private void setupGetHash(ClassGenerator<HashTable> cg, MappingSet incomingMapping, LogicalExpression[] keyExprs, boolean isProbe) throws SchemaChangeException { cg.setMappingSet(incomingMapping); if (keyExprs == null || keyExprs.length == 0) { cg.getEvalBlock()._return(JExpr.lit(0)); return; } HoldingContainer combinedHashValue = null; for (int i = 0; i < keyExprs.length; i++) { LogicalExpression expr = keyExprs[i]; cg.setMappingSet(incomingMapping); HoldingContainer input = cg.addExpr(expr, false); // compute the hash(expr) LogicalExpression hashfunc = FunctionGenerationHelper.getFunctionExpression("hash", Types.required(MinorType.INT), context.getFunctionRegistry(), input); HoldingContainer hashValue = cg.addExpr(hashfunc, false); if (i == 0) { combinedHashValue = hashValue; // first expression..just use the hash value } else { // compute the combined hash value using XOR LogicalExpression xorfunc = FunctionGenerationHelper.getFunctionExpression("xor", Types.required(MinorType.INT), context.getFunctionRegistry(), hashValue, combinedHashValue); combinedHashValue = cg.addExpr(xorfunc, false); } } if (combinedHashValue != null) { cg.getEvalBlock()._return(combinedHashValue.getValue()) ; } else { cg.getEvalBlock()._return(JExpr.lit(0)); } } }
import java.util.Scanner; public class Solution { public static void main(String[] args) { Scanner scan = new Scanner(System.in); int i = scan.nextInt(); double d = scan.nextDouble(); scan.nextLine(); String s = scan.nextLine(); // Write your code here. System.out.println("String: " + s); System.out.println("Double: " + d); System.out.println("Int: " + i); } }
package seedu.address.logic.parser.person; import static seedu.address.commons.core.Messages.MESSAGE_INVALID_COMMAND_FORMAT; import seedu.address.commons.core.index.Index; import seedu.address.commons.exceptions.IllegalValueException; import seedu.address.logic.commands.person.SelectCommand; import seedu.address.logic.parser.Parser; import seedu.address.logic.parser.ParserUtil; import seedu.address.logic.parser.exceptions.ParseException; /** * Parses input arguments and creates a new SelectCommand object */ public class SelectCommandParser implements Parser<SelectCommand> { /** * Parses the given {@code String} of arguments in the context of the SelectCommand * and returns an SelectCommand object for execution. * @throws ParseException if the user input does not conform the expected format */ public SelectCommand parse(String args) throws ParseException { try { Index index = ParserUtil.parseIndex(args); return new SelectCommand(index); } catch (IllegalValueException ive) { throw new ParseException( String.format(MESSAGE_INVALID_COMMAND_FORMAT, SelectCommand.MESSAGE_USAGE)); } } }
package tr.com.bayramcicek.plakabul; import android.content.DialogInterface; import android.os.Bundle; import android.support.v7.app.AlertDialog; import android.support.v7.app.AppCompatActivity; import android.view.View; import android.widget.AdapterView; import android.widget.ArrayAdapter; import android.widget.ListView; public class MainActivity extends AppCompatActivity { private String[] ulkeler = {"Adana", "Adıyaman", "Afyonkarahisar", "Ağrı", "Amasya", "Ankara", "Antalya", "Artvin", "Aydın" , "Balıkesir", "Bilecik", "Bingöl", "Bitlis", "Bolu", "Burdur", "Bursa", "Çanakkale", "Çankırı" , "Çorum", "Denizli", "Diyarbakır", "Edirne", "Elazığ", "Erzincan", "Erzurum", "Eskişehir", "Gaziantep" , "Giresun", "Gümüşhane", "Hakkâri", "Hatay", "Isparta", "İçel (Mersin)", "İstanbul", "İzmir", "Kars" , "Kastamonu", "Kayseri", "Kırklareli", "Kırşehir", "Kocaeli", "Konya", "Kütahya", "Malatya", "Manisa" , "Kahramanmaraş", "Mardin", "Muğla", "Muş", "Nevşehir", "Niğde", "Ordu", "Rize", "Sakarya" , "Samsun", "Siirt", "Sinop", "Sivas", "Tekirdağ", "Tokat", "Trabzon", "Tunceli", "Şanlıurfa" , "Uşak", "Van", "Yozgat", "Zonguldak", "Aksaray", "Bayburt", "Karaman", "Kırıkkale", "Batman" , "Şırnak", "Bartın", "Ardahan", "Iğdır", "Yalova", "Karabük", "Kilis", "Osmaniye", "Düzce"}; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); ListView listemiz = (ListView) findViewById(R.id.listView1); ArrayAdapter<String> veriAdaptoru = new ArrayAdapter<String>(this, android.R.layout.simple_list_item_1 ,android.R.id.text1, ulkeler ); listemiz.setAdapter(veriAdaptoru); listemiz.setOnItemClickListener(new AdapterView.OnItemClickListener() { @Override public void onItemClick(AdapterView<?> parent, View view, final int position, long id) { AlertDialog.Builder diyalogOluşturucu = new AlertDialog.Builder(MainActivity.this); diyalogOluşturucu.setMessage(ulkeler[position]) .setTitle((ulkeler[position])) .setMessage("Plaka No: " + (position + 1)) .setCancelable(false) .setPositiveButton("Geri Dön :)", new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { dialog.dismiss(); } }); diyalogOluşturucu.create().show(); } }); } }
/** * Copyright (C) 2011-2020 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.flywaydb.test.sample.junit5; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.flywaydb.test.annotation.FlywayTest; import org.flywaydb.test.FlywayTestExecutionListener; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInfo; import org.junit.jupiter.api.extension.ExtendWith; import org.springframework.context.annotation.PropertySource; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.TestExecutionListeners; import org.springframework.test.context.junit.jupiter.SpringExtension; import org.springframework.test.context.support.DependencyInjectionTestExecutionListener; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; /** * Simple Test to show {@link FlywayTest} annotation together with {@link #org.junit.jupiter.api.BeforeAll} * annotation. */ @ExtendWith(SpringExtension.class) @ContextConfiguration(locations = {"/context/simple_applicationContext.xml"}) @TestExecutionListeners({DependencyInjectionTestExecutionListener.class, FlywayTestExecutionListener.class}) @PropertySource("classpath:flyway.properties ") public class BeforeAllTest extends BaseDBHelper { private final Log logger = LogFactory.getLog(getClass()); private static int customerCount = 2; @BeforeAll @FlywayTest(locationsForMigrate = {"loadmsql"}) public static void beforeAll() { } @AfterEach public void afterEach(TestInfo testInfo) { customerCount++; logger.info(String.format("After %s test the customer count must be %d.", testInfo.getTestMethod(), customerCount)); } @Test public void simpleCountWithoutAny(TestInfo testName) throws Exception { int res = countCustomer(); assertThat("Customer count before add ", res, is(customerCount)); addCustomer("simpleCountWithoutAny"); res = countCustomer(); assertThat("Count of customer after add ", res, is(customerCount + 1)); } @Test public void additionalCountWithoutAny(TestInfo testName) throws Exception { int res = countCustomer(); assertThat("Customer count before add ", res, is(customerCount)); addCustomer("additionalCountWithoutAny"); res = countCustomer(); assertThat("Count of customer after add ", res, is(customerCount + 1)); } }
package org.nbone.message.mail.service.impl; import java.util.List; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.nbone.message.IUseCallback; import org.nbone.message.mail.vo.EmailVo; public class MailUseCallback implements IUseCallback<EmailVo,Object> { private static final Log logger = LogFactory.getLog(MailUseCallback.class); private static final String TIP = "邮件发送-"; @Override public boolean preHandle(EmailVo emailVo) { boolean result = false; if(emailVo == null ){ logger.error(new StringBuilder().append(TIP).append("bean emailVo must is not null.").append(EmailVo.class)); return result; } List<String> users = emailVo.getToAddressList(); String subject = emailVo.getSubject(); String content = emailVo.getContent(); int size; if(users == null || (size = users.size()) <= 0 ){ logger.error(new StringBuilder().append(TIP).append("邮件接收用户列表为空,请输入邮件接收用户.")); return result; } if(StringUtils.isEmpty(subject)){ logger.error(new StringBuilder().append(TIP).append("邮件主题为空,请输入邮件主题.")); return result; } if(StringUtils.isEmpty(content)){ logger.error(new StringBuilder().append(TIP).append("邮件内容为空,请输入邮件内容.")); return result; } return true; } @Override public EmailVo postHandle(EmailVo emailVo,Object result) { return emailVo; } }
package jahmm.jadetree.objectattributes; import jahmm.jadetree.DecisionInode; import jahmm.jadetree.DecisionRealNode; import java.util.List; import jutils.Name; import jutlis.algebra.Function; import jutlis.tuples.Holder; /** * * @author kommusoft * @param <TSource> The type of the source. * @param <TTarget> The type of the attribute. */ public interface ObjectAttribute<TSource, TTarget> extends Name, Function<TSource, TTarget> { public abstract double calculateScore(Function<TSource, ? extends Object> function, Holder<Object> state, List<TSource> source); public abstract double calculateScore(Function<TSource, ? extends Object> function, Holder<Object> state, TSource... source); public abstract DecisionRealNode<TSource> createDecisionNode(DecisionInode<TSource> parent, List<TSource> source, Function<TSource, ? extends Object> function, Holder<Object> state); }
/* * Copyright 2017-present Open Networking Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.opencord.aaa.impl; import org.onlab.packet.DeserializationException; import org.onlab.packet.EthType; import org.onlab.packet.Ethernet; import org.onlab.packet.RADIUS; import org.onosproject.core.ApplicationId; import org.onosproject.net.flow.DefaultTrafficSelector; import org.onosproject.net.flow.TrafficSelector; import org.onosproject.net.packet.InboundPacket; import org.onosproject.net.packet.PacketContext; import org.onosproject.net.packet.PacketService; import org.opencord.aaa.AaaConfig; import org.opencord.aaa.RadiusCommunicator; import org.slf4j.Logger; import java.io.IOException; import java.net.DatagramPacket; import java.net.DatagramSocket; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.SocketException; import java.net.UnknownHostException; import java.util.concurrent.ExecutorService; import static java.util.concurrent.Executors.newSingleThreadExecutor; import static org.onlab.util.Tools.groupedThreads; import static org.onosproject.net.packet.PacketPriority.CONTROL; import static org.slf4j.LoggerFactory.getLogger; /** * Handles Socket based communication with the RADIUS server. */ public class SocketBasedRadiusCommunicator implements RadiusCommunicator { // for verbose output private final Logger log = getLogger(getClass()); // our unique identifier private ApplicationId appId; // to receive Packet-in events that we'll respond to PacketService packetService; // Socket used for UDP communications with RADIUS server private DatagramSocket radiusSocket; private String radiusHost; // Parsed RADIUS server addresses protected InetAddress radiusIpAddress; // RADIUS server TCP port number protected short radiusServerPort; // Executor for RADIUS communication thread private ExecutorService executor; // Worker thread for RADIUS communication private ExecutorService worker; // To track the received packets int packetNumber = 1; AaaManager aaaManager; SocketBasedRadiusCommunicator(ApplicationId appId, PacketService pktService, AaaManager aaaManager) { this.appId = appId; this.packetService = pktService; this.aaaManager = aaaManager; } @Override public void initializeLocalState(AaaConfig newCfg) { if (newCfg.radiusIp() != null) { radiusIpAddress = newCfg.radiusIp(); } radiusServerPort = newCfg.radiusServerUdpPort(); radiusHost = newCfg.radiusHostName(); try { radiusSocket = new DatagramSocket(null); radiusSocket.setReuseAddress(true); radiusSocket.bind(new InetSocketAddress(radiusServerPort)); } catch (Exception ex) { log.error("Can't open RADIUS socket", ex); } log.info("Remote RADIUS Server: {}:{}", radiusIpAddress, radiusServerPort); executor = newSingleThreadExecutor(groupedThreads("onos/aaa", "radius-%d", log)); executor.execute(radiusListener); worker = newSingleThreadExecutor(groupedThreads("onos/aaa", "radius-packet-%d", log)); } @Override public void clearLocalState() { log.info("Closing RADIUS socket: {}:{}", radiusIpAddress, radiusServerPort); radiusSocket.close(); executor.shutdownNow(); worker.shutdownNow(); } @Override public void deactivate() { clearLocalState(); } @Override public void requestIntercepts() { TrafficSelector.Builder selector = DefaultTrafficSelector.builder(); selector.matchEthType(EthType.EtherType.EAPOL.ethType().toShort()); packetService.requestPackets(selector.build(), CONTROL, appId); } @Override public void withdrawIntercepts() { TrafficSelector.Builder selector = DefaultTrafficSelector.builder(); selector.matchEthType(EthType.EtherType.EAPOL.ethType().toShort()); packetService.cancelPackets(selector.build(), CONTROL, appId); } @Override public void sendRadiusPacket(RADIUS radiusPacket, InboundPacket inPkt) { try { final byte[] data = radiusPacket.serialize(); final DatagramSocket socket = radiusSocket; try { InetAddress address; if (radiusHost != null) { address = InetAddress.getByName(radiusHost); } else { address = radiusIpAddress; } DatagramPacket packet = new DatagramPacket(data, data.length, address, radiusServerPort); if (log.isTraceEnabled()) { log.trace("Sending packet {} to Radius Server {}:{} using socket", radiusPacket, address, radiusServerPort); } socket.send(packet); aaaManager.radiusOperationalStatusService.setStatusServerReqSent(true); } catch (UnknownHostException uhe) { log.warn("Unable to resolve host {}", radiusHost); aaaManager.radiusOperationalStatusService.setStatusServerReqSent(false); } } catch (IOException e) { log.error("Cannot send packet to RADIUS server", e); } } // in the socket base case we don't care about packets coming from the server as nothing meaningful will be // received from the southbound @Override public void handlePacketFromServer(PacketContext context) { InboundPacket pkt = context.inPacket(); Ethernet ethPkt = pkt.parsed(); if (log.isTraceEnabled() && ethPkt.getEtherType() != Ethernet.TYPE_LLDP && ethPkt.getEtherType() != Ethernet.TYPE_BSN) { log.trace("Skipping Ethernet packet type {}", EthType.EtherType.lookup(ethPkt.getEtherType())); } } // Handle radius packet for further processing private void handleRadiusPacketInternal(DatagramPacket inboundBasePacket) { RADIUS inboundRadiusPacket; aaaManager.checkForPacketFromUnknownServer(inboundBasePacket.getAddress().getHostAddress()); log.debug("Packet #{} received", packetNumber++); try { inboundRadiusPacket = RADIUS.deserializer().deserialize(inboundBasePacket.getData(), 0, inboundBasePacket.getLength()); if (log.isTraceEnabled()) { log.trace("Handling inboundRadiusPacket {} with identifier {}", inboundRadiusPacket, inboundRadiusPacket.getIdentifier() & 0xff); } aaaManager.aaaStatisticsManager.handleRoundtripTime(inboundRadiusPacket.getIdentifier()); aaaManager.handleRadiusPacket(inboundRadiusPacket); } catch (DeserializationException dex) { aaaManager.aaaStatisticsManager.getAaaStats().increaseMalformedResponsesRx(); log.warn("Cannot deserialize packet", dex); } } class RadiusListener implements Runnable { @Override public void run() { boolean done = false; try { log.info("UDP listener thread starting up, socket buffer size {}", radiusSocket.getReceiveBufferSize()); } catch (SocketException e) { log.error("Socket exception", e); } while (!done) { try { byte[] packetBuffer = new byte[RADIUS.RADIUS_MAX_LENGTH]; DatagramPacket inboundBasePacket = new DatagramPacket(packetBuffer, packetBuffer.length); DatagramSocket socket = radiusSocket; socket.receive(inboundBasePacket); worker.execute(() -> handleRadiusPacketInternal(inboundBasePacket)); } catch (IOException e) { log.warn("Socket was closed, exiting listener thread"); done = true; } catch (Exception e) { log.error("RadiusListener thread thrown an exception: {}", e.getMessage(), e); } } log.info("UDP listener thread shutting down"); } } RadiusListener radiusListener = new RadiusListener(); }
/*- * << * task * == * Copyright (C) 2019 - 2020 sia * == * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * >> */ package com.sia.task.integration.curator; import com.google.common.collect.Lists; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.sia.task.core.util.StringHelper; import com.sia.task.integration.curator.hanler.NodeCacheHandler; import com.sia.task.integration.curator.hanler.PathCacheHandler; import com.sia.task.integration.curator.hanler.TreeCacheHandler; import com.sia.task.integration.curator.properties.ZookeeperConstant; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.curator.framework.recipes.cache.*; import org.apache.curator.framework.state.ConnectionState; import org.apache.curator.framework.state.ConnectionStateListener; import org.apache.curator.retry.RetryNTimes; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Id; import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.*; /** * 封装的Curator客户端基本操作 * * @author pengfeili23 * @date 2018年6月27日 下午6:49:40 * <p> * 1. 增加ZK管理端API操作 * 2. 限制日志输出 */ public class CuratorClient { private static final Logger LOGGER = LoggerFactory.getLogger(CuratorClient.class); /** * CuratorFramework instance */ private CuratorFramework client; /** * store PathChildrenCache, NodeCache and TreeCache */ private final Map<String, PathChildrenCache> pathCacheMap = new ConcurrentHashMap<String, PathChildrenCache>(); private final Map<String, NodeCache> nodeCacheMap = new ConcurrentHashMap<String, NodeCache>(); private final Map<String, TreeCache> treeCacheMap = new ConcurrentHashMap<String, TreeCache>(); /** * store ExecutorService */ private final Map<String, ExecutorService> pathCacheExecutor = new ConcurrentHashMap<String, ExecutorService>(); private final Map<String, ExecutorService> nodeCacheExecutor = new ConcurrentHashMap<String, ExecutorService>(); private final Map<String, ExecutorService> treeCacheExecutor = new ConcurrentHashMap<String, ExecutorService>(); /** * 初始化ZK连接 * * @param zkAddress */ public CuratorClient(String zkAddress, int retryTimes, int sleepMsBetweenRetries) { // 建立ZK连接 client = CuratorFrameworkFactory.newClient(zkAddress, new RetryNTimes(retryTimes, sleepMsBetweenRetries)); try { client.start(); // 使用连接状态监听器(主要用来检测断线重连事件),因为 CuratorFramework 会自动断线重连,这里只记录事件 ConnectionStateListener listener = new ConnectionStateListener() { @Override public void stateChanged(CuratorFramework client, ConnectionState newState) { LOGGER.info("Zookeeper ConnectionState:" + newState.name()); } }; ThreadFactory namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("Zookeeper-ConnectionState-%d").build(); ExecutorService pool = new ThreadPoolExecutor(4, 4, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(1024), namedThreadFactory, new ThreadPoolExecutor.AbortPolicy()); client.getConnectionStateListenable().addListener(listener, pool); LOGGER.info("success connect to Zookeeper: " + zkAddress); // 应用关闭时,主动释放资源 shutdownHook(); } catch (Exception ex) { LOGGER.error("", ex); if (client != null) { closeCient(); } } } /** * get CuratorFramework for some use (e.g. lock) * * @return */ public CuratorFramework getCuratorFramework() { return client; } /** * add create authorization, can only create children in give path */ public void addCreateAuth(String scheme, String auth) { try { client.getZookeeperClient().getZooKeeper().addAuthInfo(scheme, auth.getBytes()); LOGGER.info("addCreateAuth success"); } catch (Exception e) { LOGGER.info("addCreateAuth fail: ", e); } } /** * all permissions */ public void addAllAuth(String scheme, String auth) { try { client.getZookeeperClient().getZooKeeper().addAuthInfo(scheme, auth.getBytes()); LOGGER.info("addAllAuth success"); } catch (Exception e) { LOGGER.info("addAllAuth fail: ", e); } } /** * createPersistentZKNode, creatingParentsIfNeeded for given path, CreateMode.PERSISTENT * * @param path * @param data * @return */ public boolean createPersistentZKNode(String path, String data) { if (StringHelper.isEmpty(path) || isExists(path) || data == null) { return false; } try { String zkPath = client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).forPath(path, data.getBytes()); LOGGER.info("createPersistentZKNode,创建节点成功,节点地址:" + zkPath); return true; } catch (Exception e) { LOGGER.error("createPersistentZKNode,创建节点失败:" + e.getMessage() + ",path:" + path, e); } return false; } /** * createPersistentZKNode, set default value * * @param path * @return */ public boolean createPersistentZKNode(String path) { return createPersistentZKNode(path, ZookeeperConstant.ZK_DEFAULT_VALUE); } /** * createEphemeralZKNode, creatingParentsIfNeeded for given path, leaf node is CreateMode.EPHEMERAL * * @param path * @param data * @return */ public boolean createEphemeralZKNode(String path, String data) { if (StringHelper.isEmpty(path) || isExists(path) || data == null) { return false; } try { String zkPath = client.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(path, data.getBytes()); LOGGER.info("createEphemeralZKNode,创建节点成功,节点地址:" + zkPath); return true; } catch (Exception e) { LOGGER.error("createEphemeralZKNode,创建节点失败:" + e.getMessage() + ",path:" + path, e); } return false; } /** * createEphemeralZKNode, set default value * * @param path * @return */ public boolean createEphemeralZKNode(String path) { return createEphemeralZKNode(path, ZookeeperConstant.ZK_DEFAULT_VALUE); } /** * createFixedPersistentZKNode, creating for given path( will not create parent path), CreateMode.PERSISTENT * * @param path * @param data * @return */ public boolean createFixedPersistentZKNode(String path, String data) { if (StringHelper.isEmpty(path) || isExists(path) || data == null) { return false; } try { String zkPath = client.create().withMode(CreateMode.PERSISTENT).forPath(path, data.getBytes()); LOGGER.info("createFixedPersistentZKNode,创建节点成功,节点地址:" + zkPath); return true; } catch (Exception e) { LOGGER.error("createFixedPersistentZKNode,创建节点失败:" + e.getMessage() + ",path:" + path, e); } return false; } /** * createFixedPersistentZKNode, set default value * * @param path * @return */ public boolean createFixedPersistentZKNode(String path) { return createFixedPersistentZKNode(path, ZookeeperConstant.ZK_DEFAULT_VALUE); } /** * createFixedEphemeralZKNode, creating for given path( will not create parent path), leaf node is * CreateMode.EPHEMERAL * * @param path * @param data * @return */ public boolean createFixedEphemeralZKNode(String path, String data) { if (StringHelper.isEmpty(path) || isExists(path) || data == null) { return false; } try { String zkPath = client.create().withMode(CreateMode.EPHEMERAL).forPath(path, data.getBytes()); LOGGER.info("createFixedEphemeralZKNode,创建节点成功,节点地址:" + zkPath); return true; } catch (Exception e) { LOGGER.error("createFixedEphemeralZKNode,创建节点失败:" + e.getMessage() + ",path:" + path, e); } return false; } /** * createFixedEphemeralZKNode, set default value * * @param path * @return */ public boolean createFixedEphemeralZKNode(String path) { return createFixedEphemeralZKNode(path, ZookeeperConstant.ZK_DEFAULT_VALUE); } /** * setData * * @param path * @param data * @return */ public boolean setData(String path, String data) { if (!isExists(path) || data == null) { return false; } try { Stat stat = client.setData().forPath(path, data.getBytes()); LOGGER.info("setData,更新数据成功, path:" + path + ", stat: " + stat); return true; } catch (Exception e) { LOGGER.error("setData,更新节点数据失败:" + e.getMessage() + ",path:" + path, e); } return false; } /** * may return null if path not exists * * @param path * @return */ public String getData(String path) { String response = null; if (!isExists(path)) { return response; } try { byte[] datas = client.getData().forPath(path); response = datas == null ? "" : new String(datas, "utf-8"); if (LOGGER.isDebugEnabled()) { LOGGER.info("读取数据成功, path:" + path + ", content:" + response); } } catch (Exception e) { LOGGER.error("getData,读取数据失败! path: " + path + ", errMsg:" + e.getMessage(), e); } return response; } /** * may return null if path not exists * * @param path * @return */ public List<String> getChildren(String path) { List<String> list = null; if (!isExists(path)) { return list; } try { list = client.getChildren().forPath(path); if (LOGGER.isDebugEnabled()) { LOGGER.debug("getChildren,读取数据成功, path:" + path); } } catch (Exception e) { LOGGER.error("getChildren,读取数据失败! path: " + path + ", errMsg:" + e.getMessage(), e); } return list; } /** * for given path * * @param path * @return */ public boolean isExists(String path) { if (StringHelper.isEmpty(path)) { return false; } try { Stat stat = client.checkExists().forPath(path); return null != stat; } catch (Exception e) { LOGGER.error("isExists 读取数据失败! path: " + path + ", errMsg:" + e.getMessage(), e); } return false; } /** * for given path (node) isPersistent or (EPHEMERAL) * * @param path * @return */ public boolean isPersistent(String path) { if (StringHelper.isEmpty(path)) { return false; } try { Stat stat = client.checkExists().forPath(path); if (stat == null) { return false; } // If it is not an ephemeral node, it will be zero. return stat.getEphemeralOwner() == 0L; } catch (Exception e) { LOGGER.error("isPersistent 读取数据失败! path: " + path + ", errMsg:" + e.getMessage(), e); } return false; } /** * only delete leaf node for given path * * @param path * @return */ public boolean deleteLeafZKNode(String path) { if (!isExists(path)) { return false; } try { client.delete().forPath(path); LOGGER.info("deleteLeafZKNode,删除节点成功,节点地址:" + path); return true; } catch (Exception e) { LOGGER.error("deleteLeafZKNode,删除节点失败:" + e.getMessage() + ",path:" + path, e); } return false; } /** * deletingChildrenIfNeeded for given path * * @param path * @return */ public boolean deletePathZKNode(String path) { if (!isExists(path)) { return false; } try { client.delete().deletingChildrenIfNeeded().forPath(path); LOGGER.info("deletePathZKNode,删除节点成功,节点地址:" + path); return true; } catch (Exception e) { LOGGER.error("deletePathZKNode,删除节点失败:" + e.getMessage() + ",path:" + path, e); } return false; } /** * 创建 PathChildrenCache,对指定路径节点的一级子目录监听,不对该节点的操作监听,对其子目录的增删改操作监听。如果指定路径删除后又创建,Watcher失效。 * * @param path * @param handler * @return * @throws Exception */ public PathChildrenCache createPathCache(String path, PathCacheHandler handler) throws Exception { // 创建PathChildrenCache监听器 PathChildrenCache childrenCache = new PathChildrenCache(this.client, path, true); PathChildrenCacheListener childrenCacheListener = new PathChildrenCacheListener() { @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { handler.process(event); } }; ThreadFactory namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("Zookeeper-PathChildrenCacheListener-%d").build(); ExecutorService pool = new ThreadPoolExecutor(4, 4, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(1024), namedThreadFactory, new ThreadPoolExecutor.AbortPolicy()); childrenCache.getListenable().addListener(childrenCacheListener, pool); // 对PathChildrenCache做统一管理,同一路径只能创建同一类型的监听(创建多个也只有一个生效) PathChildrenCache current = pathCacheMap.putIfAbsent(path, childrenCache); // 加入资源MAP if (current == null) { pathCacheExecutor.putIfAbsent(path, pool); childrenCache.start(); LOGGER.info("Register zookeeper path: [" + path + "]'s PathChildrenCache successfully!"); return childrenCache; } // 资源早已存在,关闭无效的资源 LOGGER.info("zookeeper path: [" + path + "]'s PathChildrenCache already exists!"); childrenCache.close(); pool.shutdown(); return current; } /** * 创建 NodeCache,对一个节点进行监听,监听事件包括指定路径的增删改操作。如果指定路径删除后又创建,Watcher继续生效。 * * @param path * @param handler * @return * @throws Exception */ public NodeCache createNodeCache(String path, NodeCacheHandler handler) throws Exception { // 创建nodeCache监听器 NodeCache nodeCache = new NodeCache(this.client, path, false); NodeCacheListener nodeListener = new NodeCacheListener() { @Override public void nodeChanged() throws Exception { handler.process(nodeCache); } }; ThreadFactory namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("Zookeeper-NodeCacheListener-%d").build(); ExecutorService pool = new ThreadPoolExecutor(4, 4, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(1024), namedThreadFactory, new ThreadPoolExecutor.AbortPolicy()); nodeCache.getListenable().addListener(nodeListener, pool); // 对NodeCache做统一管理,同一路径只能创建同一类型的监听(创建多个也只有一个生效) NodeCache current = nodeCacheMap.putIfAbsent(path, nodeCache); // 加入资源MAP if (current == null) { nodeCacheExecutor.putIfAbsent(path, pool); nodeCache.start(); LOGGER.info("Register zookeeper path: [" + path + "]'s NodeCache successfully!"); return nodeCache; } // 资源早已存在,关闭无效的资源 LOGGER.info("zookeeper path: [" + path + "]'s NodeCache already exists!"); nodeCache.close(); pool.shutdown(); return current; } /** * 综合NodeCache和PathChildrenCahce的特性,是对整个目录进行监听,可以设置监听深度。如果指定路径删除后又创建,Watcher继续生效。 * * @param path * @param handler * @return * @throws Exception */ public TreeCache createTreeCache(String path, TreeCacheHandler handler) throws Exception { // 创建treeCache监听器 TreeCache treeCache = new TreeCache(this.client, path); TreeCacheListener treeCacheListener = new TreeCacheListener() { @Override public void childEvent(CuratorFramework client, TreeCacheEvent event) throws Exception { handler.process(event); } }; ThreadFactory namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("Zookeeper-TreeCacheListener-%d").build(); ExecutorService pool = new ThreadPoolExecutor(4, 4, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(1024), namedThreadFactory, new ThreadPoolExecutor.AbortPolicy()); treeCache.getListenable().addListener(treeCacheListener, pool); // 对TreeCache做统一管理,同一路径只能创建同一类型的监听(创建多个也只有一个生效) TreeCache current = treeCacheMap.putIfAbsent(path, treeCache); // 加入资源MAP if (current == null) { treeCacheExecutor.putIfAbsent(path, pool); treeCache.start(); LOGGER.info("Register zookeeper path: [" + path + "]'s TreeCache successfully!"); return treeCache; } // 资源早已存在,关闭无效的资源 LOGGER.info("zookeeper path: [" + path + "]'s TreeCache already exists!"); treeCache.close(); pool.shutdown(); return current; } /** * 移除指定路径的PathChildrenCache(如果有) * * @param path * @throws Exception */ public void closePathCache(String path) throws Exception { if (StringHelper.isEmpty(path)) { return; } PathChildrenCache pathCache = pathCacheMap.get(path); if (pathCache != null) { pathCacheMap.remove(path); pathCache.close(); LOGGER.info("close PathChildrenCache:" + path); } ExecutorService executor = pathCacheExecutor.get(path); if (executor != null) { pathCacheExecutor.remove(path); executor.shutdown(); LOGGER.info("close ExecutorService for PathChildrenCache:" + path); } } /** * 移除指定路径的NodeCache(如果有) * * @param path * @throws Exception */ public void closeNodeCache(String path) throws Exception { if (StringHelper.isEmpty(path)) { return; } NodeCache nodeCache = nodeCacheMap.get(path); if (nodeCache != null) { nodeCacheMap.remove(path); nodeCache.close(); LOGGER.info("close NodeCache:" + path); } ExecutorService executor = nodeCacheExecutor.get(path); if (executor != null) { nodeCacheExecutor.remove(path); executor.shutdown(); LOGGER.info("close ExecutorService for NodeCache:" + path); } } /** * 移除指定路径的TreeCache(如果有) * * @param path * @throws Exception */ public void closeTreeCache(String path) throws Exception { if (StringHelper.isEmpty(path)) { return; } TreeCache treeCache = treeCacheMap.get(path); if (treeCache != null) { treeCacheMap.remove(path); treeCache.close(); LOGGER.info("close TreeCache:" + path); } ExecutorService executor = treeCacheExecutor.get(path); if (executor != null) { treeCacheExecutor.remove(path); executor.shutdown(); LOGGER.info("close ExecutorService for TreeCache:" + path); } } /** * 移除所有路径的PathChildrenCache * * @throws Exception */ public void closeAllPathCache() throws Exception { Set<String> paths = pathCacheMap.keySet(); for (String path : paths) { closePathCache(path); } } /** * 移除所有路径的NodeCache * * @throws Exception */ public void closeAllNodeCache() throws Exception { Set<String> paths = nodeCacheMap.keySet(); for (String path : paths) { closeNodeCache(path); } } /** * 移除所有路径的TreeCache * * @throws Exception */ public void closeAllTreeCache() throws Exception { Set<String> paths = treeCacheMap.keySet(); for (String path : paths) { closeTreeCache(path); } } /** * 主动关闭ZK连接,释放资源 * * @throws Exception */ public void close() throws Exception { closeAllPathCache(); closeAllNodeCache(); closeAllTreeCache(); closeCient(); } /** * closeCient */ public void closeCient() { try { client.close(); } catch (Exception ex) { LOGGER.error("", ex); } } /** * 用于应用正常关闭时,主动断开与ZK的连接,保证临时节点快速失效! */ private void shutdownHook() { LOGGER.info("addShutdownHook for CuratorClient"); Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() { @Override public void run() { try { LOGGER.info("shutdownHook begin"); close(); LOGGER.info("shutdownHook end"); } catch (Exception e) { LOGGER.error("", e); } } })); } //新增ZkAPI操作 /** * 获得节点状态值 * * @param path * @return * @throws Exception */ public Stat getStat(String path) throws Exception { Stat stat = client.checkExists().forPath(path); return stat; } /** * 获得节点ACL信息 * * @param path * @return * @throws Exception */ public Map<String, Object> getACL(String path) throws Exception { ACL acl = client.getACL().forPath(path).get(0); Id id = acl.getId(); HashMap<String, Object> map = new HashMap<>(); map.put("perms", acl.getPerms()); map.put("id", id.getId()); map.put("scheme", id.getScheme()); return map; } /** * 获得节点的version号,如果节点不存在,返回 -1 * * @param path * @return * @throws Exception */ public int getVersion(String path) throws Exception { Stat stat = this.getStat(path); if (stat != null) { return stat.getVersion(); } else { return -1; } } /** * 创建节点 * * @param path 节点path * @param payload 初始数据内容 * @return */ public void createNode(String path, byte[] payload) throws Exception { client.create().creatingParentsIfNeeded().forPath(path, payload); LOGGER.info("节点创建成功, Path: " + path); } /** * createNodeWithACL * Create a node under ACL mode * * @param path * @param payload * @throws Exception */ public void createNodeWithACL(String path, byte[] payload) throws Exception { ACL acl = new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.AUTH_IDS); List<ACL> aclList = Lists.newArrayList(acl); try { client.create().withACL(aclList).forPath(path, payload); } catch (Exception e) { LOGGER.error("Create security file failed."); e.printStackTrace(); } } /** * 删除指定节点 * * @param path 节点path */ public void deleteNode(String path) throws Exception { client.delete().forPath(path); LOGGER.info("节点删除成功, Path: " + path); } /** * 更新指定节点数据内容 * * @param path 节点path * @param payload 数据内容 * @return */ public boolean setData(String path, byte[] payload) throws Exception { Stat stat = client.setData().forPath(path, payload); if (stat != null) { //logger.info("设置数据成功,path:" + path ); return true; } else { LOGGER.error("设置数据失败,path:" + path); return false; } } /** * CAS更新指定节点数据内容 * * @param path 节点path * @param payload 数据内容 * @param version 版本号 * @return * @throws Exception */ public int setDataWithVersion(String path, byte[] payload, int version) throws Exception { try { Stat stat = null; if (version != -1) { stat = client.setData().withVersion(version).forPath(path, payload); } else { stat = client.setData().forPath(path, payload); } if (stat != null) { //logger.info("CAS设置数据成功,path:" + path ); return stat.getVersion(); } else { LOGGER.error("CAS设置数据失败,path : {}", path); return -1; } } catch (KeeperException.BadVersionException ex) { LOGGER.error("CAS设置数据失败,path : {},error msg : {}", path, ex.getMessage()); return -1; } } }
package in.hocg.payment.spring.boot.sample; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; /** * Created by hocgin on 2019/12/14. * email: hocgin@gmail.com * * @author hocgin */ @SpringBootApplication public class PaymentApplication { public static void main(String[] args) { SpringApplication.run(PaymentApplication.class, args); } }
/******************************************************************************* * Copyright (c) 2008 IBM Corporation and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * IBM Corporation - initial API and implementation *******************************************************************************/ package api; /** * Test annotation for Java 5 performance testing * * @since 1.0.0 * @noinstantiate */ public @interface TestAnnot { /** * */ public String name = null; /** * @return value * @nooverride */ public int m1() default -1; }
/* * Copyright 2019 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gradle.api.plugins.scala; import org.gradle.api.Incubating; import org.gradle.api.provider.Property; /** * Common configuration for Scala based projects. This is added by the {@link ScalaBasePlugin}. * * @since 6.0 */ @Incubating public interface ScalaPluginExtension { /** * The version of the Zinc compiler to use for compiling Scala code. * <p> * Default version is Zinc {@value ScalaBasePlugin#DEFAULT_ZINC_VERSION}. * </p> * * Gradle supports Zinc 1.2.0 to 1.3.5. * * @return zinc compiler version * @since 6.0 */ Property<String> getZincVersion(); }
package com.yammer.tenacity.core.config; import com.google.common.base.Optional; import com.netflix.hystrix.HystrixCommandProperties; import javax.validation.Valid; import javax.validation.constraints.Max; import javax.validation.constraints.Min; import javax.validation.constraints.NotNull; import java.util.Objects; public class TenacityConfiguration { @NotNull @Valid private ThreadPoolConfiguration threadpool = new ThreadPoolConfiguration(); @NotNull @Valid private CircuitBreakerConfiguration circuitBreaker = new CircuitBreakerConfiguration(); @NotNull @Valid private SemaphoreConfiguration semaphore = new SemaphoreConfiguration(); @Min(value = 0) @Max(Integer.MAX_VALUE) private int executionIsolationThreadTimeoutInMillis = 1000; private Optional<HystrixCommandProperties.ExecutionIsolationStrategy> executionIsolationStrategy = Optional.absent(); public TenacityConfiguration() { /* Jackson */ } public TenacityConfiguration(ThreadPoolConfiguration threadpool, CircuitBreakerConfiguration circuitBreaker, SemaphoreConfiguration semaphore, int executionIsolationThreadTimeoutInMillis) { this(threadpool, circuitBreaker, semaphore, executionIsolationThreadTimeoutInMillis, null); } public TenacityConfiguration(ThreadPoolConfiguration threadpool, CircuitBreakerConfiguration circuitBreaker, SemaphoreConfiguration semaphore, int executionIsolationThreadTimeoutInMillis, HystrixCommandProperties.ExecutionIsolationStrategy executionIsolationStrategy) { this.threadpool = threadpool; this.circuitBreaker = circuitBreaker; this.semaphore = semaphore; this.executionIsolationThreadTimeoutInMillis = executionIsolationThreadTimeoutInMillis; this.executionIsolationStrategy = Optional.fromNullable(executionIsolationStrategy); } public ThreadPoolConfiguration getThreadpool() { return threadpool; } public CircuitBreakerConfiguration getCircuitBreaker() { return circuitBreaker; } public int getExecutionIsolationThreadTimeoutInMillis() { return executionIsolationThreadTimeoutInMillis; } public void setThreadpool(ThreadPoolConfiguration threadpool) { this.threadpool = threadpool; } public void setCircuitBreaker(CircuitBreakerConfiguration circuitBreaker) { this.circuitBreaker = circuitBreaker; } public void setExecutionIsolationThreadTimeoutInMillis(int executionIsolationThreadTimeoutInMillis) { this.executionIsolationThreadTimeoutInMillis = executionIsolationThreadTimeoutInMillis; } public SemaphoreConfiguration getSemaphore() { return semaphore; } public void setSemaphore(SemaphoreConfiguration semaphore) { this.semaphore = semaphore; } public boolean hasExecutionIsolationStrategy() { return executionIsolationStrategy.isPresent(); } public HystrixCommandProperties.ExecutionIsolationStrategy getExecutionIsolationStrategy() { return executionIsolationStrategy.orNull(); } public void setExecutionIsolationStrategy(HystrixCommandProperties.ExecutionIsolationStrategy executionIsolationStrategy) { this.executionIsolationStrategy = Optional.fromNullable(executionIsolationStrategy); } @Override public int hashCode() { return Objects.hash(threadpool, circuitBreaker, semaphore, executionIsolationThreadTimeoutInMillis, executionIsolationStrategy); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } final TenacityConfiguration other = (TenacityConfiguration) obj; return Objects.equals(this.threadpool, other.threadpool) && Objects.equals(this.circuitBreaker, other.circuitBreaker) && Objects.equals(this.semaphore, other.semaphore) && Objects.equals(this.executionIsolationThreadTimeoutInMillis, other.executionIsolationThreadTimeoutInMillis) && Objects.equals(this.executionIsolationStrategy, other.executionIsolationStrategy); } @Override public String toString() { return "TenacityConfiguration{" + "threadpool=" + threadpool + ", circuitBreaker=" + circuitBreaker + ", semaphore=" + semaphore + ", executionIsolationThreadTimeoutInMillis=" + executionIsolationThreadTimeoutInMillis + ", executionIsolationStrategy=" + executionIsolationStrategy + '}'; } }
package pers.yue.test.performance.test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import pers.yue.test.demo.performance.client.DemoClient; import pers.yue.test.demo.performance.runner.DemoDeleteRunner; import pers.yue.test.demo.performance.runner.DemoGetRunner; import pers.yue.test.demo.performance.runner.DemoPutRunner; import pers.yue.test.performance.PerfTestCore; import pers.yue.test.performance.config.Filter; import pers.yue.test.performance.config.PercentFilter; import pers.yue.test.performance.data.DataGenerator; import pers.yue.test.performance.data.PoolDataGenerator; import pers.yue.test.performance.stat.StatInfo; import pers.yue.common.util.ThreadUtil; import static pers.yue.common.util.FileUtil.MB; import static pers.yue.common.util.PropertiesUtil.parseAlternativeSizeProperty; import static pers.yue.common.util.PropertiesUtil.parseProperty; /** * Demo test class of implementing performance test tool with the performance test core module. * * Created by Zhang Yue on 6/8/2019 */ public class DemoPerfTest extends DemoTestBase { private static Logger logger = LoggerFactory.getLogger(ThreadUtil.getClassName()); /** * The PerfTestBase instance which holds all the properties that are required by performance test. */ private PerfTestCore<String> perfTest = new PerfTestCore<>(); private static final long DEFAULT_SIZE_MIN = 1L; private static final long DEFAULT_SIZE_MAX = 2 * MB; private static final int DEFAULT_DATA_POOL_SIZE = 10; private static final String SIZE_MIN_PROPERTY_NAME = "sizeMin"; private static final String SIZE_MAX_PROPERTY_NAME = "sizeMax"; private static final String SIZE_PROPERTY_NAME = "size"; private static final String DATA_POOL_SIZE_PROPERTY_NAME = "dataPoolSize"; /** * Minimum size of test data. */ private long sizeMin = DEFAULT_SIZE_MIN; /** * Maximum size of test data. */ private long sizeMax = DEFAULT_SIZE_MAX; /** * Size of test data pool. Each file in this pool is unique. */ private int dataPoolSize = DEFAULT_DATA_POOL_SIZE; @BeforeClass(alwaysRun = true) public void setupDemoPerfTest() { String sizeMinProperty = System.getProperty(SIZE_MIN_PROPERTY_NAME); String sizeMaxProperty = System.getProperty(SIZE_MAX_PROPERTY_NAME); String sizeProperty = System.getProperty(SIZE_PROPERTY_NAME); String dataPoolSizeProperty = System.getProperty(DATA_POOL_SIZE_PROPERTY_NAME); try { sizeMin = parseAlternativeSizeProperty(sizeMin, sizeMinProperty, sizeProperty); sizeMax = parseAlternativeSizeProperty(sizeMax, sizeMaxProperty, sizeProperty); dataPoolSize = parseProperty(dataPoolSize, dataPoolSizeProperty); } catch (RuntimeException e) { logger.error("Exception when parsing system property.", e); throw e; } logger.info("======== sizeMin: {} ========", sizeMin); logger.info("======== sizeMax: {} ========", sizeMax); } @Test public void runPerfPut() { DataGenerator dataGenerator = new PoolDataGenerator(sizeMin, sizeMax, dataPoolSize); StatInfo resultStat = perfTest.launchRunner(new DemoPutRunner(new DemoClient(endpoint), dataGenerator, perfTest.getConfig())); perfTest.getDataStore().persist(); perfTest.assertResult(resultStat); } @Test public void runPerfGet() { // StatInfo resultStat = perfTest.launchRunner(new DemoGetRunner(new DemoClient(endpoint), perfTest.getConfig().withKeys())); Filter filter = new PercentFilter<>(perfTest.getConfig().withKeyList().getKeys(), 10); StatInfo resultStat = perfTest.launchRunner(new DemoGetRunner(new DemoClient(endpoint), perfTest.getConfig().withKeyList().withFilter(filter))); perfTest.assertResult(resultStat); } @Test public void runPerfGetVerifyData() { StatInfo resultStat = perfTest.launchRunner(new DemoGetRunner(new DemoClient(endpoint), perfTest.getConfig().withDataStore())); perfTest.assertResult(resultStat); } @Test public void runPerfDelete() { StatInfo resultStat = perfTest.launchRunner(new DemoDeleteRunner(new DemoClient(endpoint), perfTest.getConfig())); perfTest.getDataStore().persist(); perfTest.assertResult(resultStat); } }
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. package com.azure.messaging.eventhubs; import com.azure.core.amqp.AmqpEndpointState; import com.azure.core.amqp.AmqpRetryMode; import com.azure.core.amqp.AmqpRetryOptions; import com.azure.core.amqp.AmqpTransportType; import com.azure.core.amqp.ProxyOptions; import com.azure.core.amqp.exception.AmqpErrorCondition; import com.azure.core.amqp.exception.AmqpErrorContext; import com.azure.core.amqp.exception.AmqpException; import com.azure.core.amqp.implementation.AmqpSendLink; import com.azure.core.amqp.implementation.CbsAuthorizationType; import com.azure.core.amqp.implementation.ConnectionOptions; import com.azure.core.amqp.implementation.MessageSerializer; import com.azure.core.amqp.implementation.TracerProvider; import com.azure.core.credential.TokenCredential; import com.azure.core.util.ClientOptions; import com.azure.core.util.Context; import com.azure.core.util.logging.ClientLogger; import com.azure.core.util.tracing.ProcessKind; import com.azure.core.util.tracing.Tracer; import com.azure.messaging.eventhubs.implementation.ClientConstants; import com.azure.messaging.eventhubs.implementation.EventHubAmqpConnection; import com.azure.messaging.eventhubs.implementation.EventHubConnectionProcessor; import com.azure.messaging.eventhubs.models.CreateBatchOptions; import com.azure.messaging.eventhubs.models.SendOptions; import org.apache.qpid.proton.amqp.messaging.Section; import org.apache.qpid.proton.engine.SslDomain; import org.apache.qpid.proton.message.Message; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInfo; import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import reactor.core.publisher.DirectProcessor; import reactor.core.publisher.Flux; import reactor.core.publisher.FluxSink; import reactor.core.publisher.Mono; import reactor.core.scheduler.Scheduler; import reactor.core.scheduler.Schedulers; import reactor.test.StepVerifier; import java.time.Duration; import java.time.Instant; import java.util.Collections; import java.util.List; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import static com.azure.core.util.tracing.Tracer.AZ_TRACING_NAMESPACE_KEY; import static com.azure.core.util.tracing.Tracer.DIAGNOSTIC_ID_KEY; import static com.azure.core.util.tracing.Tracer.ENTITY_PATH_KEY; import static com.azure.core.util.tracing.Tracer.HOST_NAME_KEY; import static com.azure.core.util.tracing.Tracer.PARENT_SPAN_KEY; import static com.azure.core.util.tracing.Tracer.SPAN_BUILDER_KEY; import static com.azure.messaging.eventhubs.implementation.ClientConstants.AZ_NAMESPACE_VALUE; import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.isNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyZeroInteractions; import static org.mockito.Mockito.when; class EventHubProducerAsyncClientTest { private static final ClientOptions CLIENT_OPTIONS = new ClientOptions(); private static final String HOSTNAME = "my-host-name"; private static final String EVENT_HUB_NAME = "my-event-hub-name"; private static final String ENTITY_PATH = HOSTNAME + ".servicebus.windows.net"; @Mock private AmqpSendLink sendLink; @Mock private AmqpSendLink sendLink2; @Mock private AmqpSendLink sendLink3; @Mock private EventHubAmqpConnection connection; @Mock private EventHubAmqpConnection connection2; @Mock private EventHubAmqpConnection connection3; @Mock private TokenCredential tokenCredential; @Mock private Runnable onClientClosed; @Captor private ArgumentCaptor<Message> singleMessageCaptor; @Captor private ArgumentCaptor<List<Message>> messagesCaptor; private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final MessageSerializer messageSerializer = new EventHubMessageSerializer(); private final AmqpRetryOptions retryOptions = new AmqpRetryOptions() .setDelay(Duration.ofMillis(500)) .setMode(AmqpRetryMode.FIXED) .setTryTimeout(Duration.ofSeconds(10)); private final DirectProcessor<AmqpEndpointState> endpointProcessor = DirectProcessor.create(); private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private EventHubProducerAsyncClient producer; private EventHubConnectionProcessor connectionProcessor; private TracerProvider tracerProvider; private ConnectionOptions connectionOptions; private final Scheduler testScheduler = Schedulers.newElastic("test"); @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } @BeforeEach void setup(TestInfo testInfo) { MockitoAnnotations.initMocks(this); tracerProvider = new TracerProvider(Collections.emptyList()); connectionOptions = new ConnectionOptions(HOSTNAME, tokenCredential, CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP_WEB_SOCKETS, retryOptions, ProxyOptions.SYSTEM_DEFAULTS, testScheduler, CLIENT_OPTIONS, SslDomain.VerifyMode.VERIFY_PEER_NAME); when(connection.getEndpointStates()).thenReturn(endpointProcessor); endpointSink.next(AmqpEndpointState.ACTIVE); connectionProcessor = Mono.fromCallable(() -> connection).repeat(10).subscribeWith( new EventHubConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(), "event-hub-path", connectionOptions.getRetry())); producer = new EventHubProducerAsyncClient(HOSTNAME, EVENT_HUB_NAME, connectionProcessor, retryOptions, tracerProvider, messageSerializer, testScheduler, false, onClientClosed); when(sendLink.getLinkSize()).thenReturn(Mono.just(ClientConstants.MAX_MESSAGE_LENGTH_BYTES)); when(sendLink2.getLinkSize()).thenReturn(Mono.just(ClientConstants.MAX_MESSAGE_LENGTH_BYTES)); when(sendLink3.getLinkSize()).thenReturn(Mono.just(ClientConstants.MAX_MESSAGE_LENGTH_BYTES)); } @AfterEach void teardown(TestInfo testInfo) { testScheduler.dispose(); Mockito.framework().clearInlineMocks(); Mockito.reset(sendLink, connection); singleMessageCaptor = null; messagesCaptor = null; } /** * Verifies that sending multiple events will result in calling producer.send(List&lt;Message&gt;). */ @Test void sendMultipleMessages() { // Arrange final int count = 4; final byte[] contents = TEST_CONTENTS.getBytes(UTF_8); final Flux<EventData> testData = Flux.range(0, count).flatMap(number -> { final EventData data = new EventData(contents); return Flux.just(data); }); final SendOptions options = new SendOptions(); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(sendLink)); when(sendLink.send(anyList())).thenReturn(Mono.empty()); // Act StepVerifier.create(producer.send(testData, options)) .verifyComplete(); // Assert verify(sendLink).send(messagesCaptor.capture()); final List<Message> messagesSent = messagesCaptor.getValue(); Assertions.assertEquals(count, messagesSent.size()); messagesSent.forEach(message -> Assertions.assertEquals(Section.SectionType.Data, message.getBody().getType())); } /** * Verifies that sending a single event data will result in calling producer.send(Message). */ @Test void sendSingleMessage() { // Arrange final EventData testData = new EventData(TEST_CONTENTS.getBytes(UTF_8)); final SendOptions options = new SendOptions(); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(sendLink)); when(sendLink.send(any(Message.class))).thenReturn(Mono.empty()); // Act StepVerifier.create(producer.send(testData, options)) .verifyComplete(); // Assert verify(sendLink, times(1)).send(any(Message.class)); verify(sendLink).send(singleMessageCaptor.capture()); final Message message = singleMessageCaptor.getValue(); Assertions.assertEquals(Section.SectionType.Data, message.getBody().getType()); } /** * Verifies that sending a single event data will not throw an {@link IllegalStateException} if we block because * we are publishing on an elastic scheduler. */ @Test void sendSingleMessageWithBlock() throws InterruptedException { // Arrange final Mono<Instant> saveAction = Mono.delay(Duration.ofMillis(500)) .then(Mono.fromCallable(() -> { logger.info("This is saved."); return Instant.now(); })); final EventData testData = new EventData(TEST_CONTENTS.getBytes(UTF_8)); final SendOptions options = new SendOptions(); final Semaphore semaphore = new Semaphore(1); // In our actual client builder, we allow this. final EventHubProducerAsyncClient flexibleProducer = new EventHubProducerAsyncClient(HOSTNAME, EVENT_HUB_NAME, connectionProcessor, retryOptions, tracerProvider, messageSerializer, testScheduler, false, onClientClosed); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(sendLink)); when(sendLink.send(any(Message.class))).thenReturn(Mono.<Void>empty().publishOn(Schedulers.single())); Assertions.assertTrue(semaphore.tryAcquire(30, TimeUnit.SECONDS)); // Act final Mono<Instant> sendMono = flexibleProducer.send(testData, options).thenReturn(Instant.now()); sendMono.subscribe(e -> { logger.info("Saving message: {}", e); // This block here should throw an IllegalStateException if we aren't publishing correctly. final Instant result = saveAction.block(Duration.ofSeconds(3)); Assertions.assertNotNull(result); logger.info("Message saved: {}", result); semaphore.release(); }); // Assert Assertions.assertTrue(semaphore.tryAcquire(30, TimeUnit.SECONDS)); verify(sendLink).send(any(Message.class)); verify(sendLink).send(singleMessageCaptor.capture()); final Message message = singleMessageCaptor.getValue(); Assertions.assertEquals(Section.SectionType.Data, message.getBody().getType()); verifyZeroInteractions(onClientClosed); } /** * Verifies that a partitioned producer cannot also send events with a partition key. */ @Test void partitionProducerCannotSendWithPartitionKey() { // Arrange final Flux<EventData> testData = Flux.just( new EventData(TEST_CONTENTS.getBytes(UTF_8)), new EventData(TEST_CONTENTS.getBytes(UTF_8))); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(sendLink)); when(sendLink.send(anyList())).thenReturn(Mono.empty()); final SendOptions options = new SendOptions() .setPartitionKey("Some partition key") .setPartitionId("my-partition-id"); // Act & Assert StepVerifier.create(producer.send(testData, options)) .expectError(IllegalArgumentException.class) .verify(Duration.ofSeconds(10)); verifyZeroInteractions(sendLink); } /** * Verifies start and end span invoked when sending a single message. */ @Test void sendStartSpanSingleMessage() { // Arrange final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); final Flux<EventData> testData = Flux.just( new EventData(TEST_CONTENTS.getBytes(UTF_8)), new EventData(TEST_CONTENTS.getBytes(UTF_8))); final String partitionId = "my-partition-id"; final SendOptions sendOptions = new SendOptions() .setPartitionId(partitionId); final EventHubProducerAsyncClient asyncProducer = new EventHubProducerAsyncClient(HOSTNAME, EVENT_HUB_NAME, connectionProcessor, retryOptions, tracerProvider, messageSerializer, Schedulers.parallel(), false, onClientClosed); when(connection.createSendLink( argThat(name -> name.endsWith(partitionId)), argThat(name -> name.endsWith(partitionId)), eq(retryOptions))) .thenReturn(Mono.just(sendLink)); when(sendLink.send(anyList())).thenReturn(Mono.empty()); when(tracer1.start(eq("EventHubs.send"), any(), eq(ProcessKind.SEND))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertEquals(passed.getData(AZ_TRACING_NAMESPACE_KEY).get(), AZ_NAMESPACE_VALUE); return passed.addData(PARENT_SPAN_KEY, "value"); } ); when(tracer1.start(eq("EventHubs.message"), any(), eq(ProcessKind.MESSAGE))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertEquals(passed.getData(AZ_TRACING_NAMESPACE_KEY).get(), AZ_NAMESPACE_VALUE); return passed.addData(PARENT_SPAN_KEY, "value").addData(DIAGNOSTIC_ID_KEY, "value2"); } ); when(tracer1.getSharedSpanBuilder(eq("EventHubs.send"), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_BUILDER_KEY, "value"); } ); // Act StepVerifier.create(asyncProducer.send(testData, sendOptions)) .verifyComplete(); // Assert verify(tracer1, times(1)) .start(eq("EventHubs.send"), any(), eq(ProcessKind.SEND)); verify(tracer1, times(2)) .start(eq("EventHubs.message"), any(), eq(ProcessKind.MESSAGE)); verify(tracer1, times(3)).end(eq("success"), isNull(), any()); verifyZeroInteractions(onClientClosed); } /** * Verifies send, message and addLink spans are only invoked once even for multiple retry attempts to send the * message. */ @Test void sendMessageRetrySpanTest() { //Arrange final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); producer = new EventHubProducerAsyncClient(HOSTNAME, EVENT_HUB_NAME, connectionProcessor, retryOptions, tracerProvider, messageSerializer, Schedulers.parallel(), false, onClientClosed); final String failureKey = "fail"; final EventData testData = new EventData("test"); testData.getProperties().put(failureKey, "true"); when(tracer1.start(eq("EventHubs.send"), any(), eq(ProcessKind.SEND))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertEquals(passed.getData(AZ_TRACING_NAMESPACE_KEY).get(), AZ_NAMESPACE_VALUE); return passed.addData(PARENT_SPAN_KEY, "value").addData(HOST_NAME_KEY, "value2"); } ); when(tracer1.start(eq("EventHubs.message"), any(), eq(ProcessKind.MESSAGE))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertEquals(passed.getData(AZ_TRACING_NAMESPACE_KEY).get(), AZ_NAMESPACE_VALUE); return passed.addData(PARENT_SPAN_KEY, "value").addData(DIAGNOSTIC_ID_KEY, "value2"); } ); when(tracer1.getSharedSpanBuilder(eq("EventHubs.send"), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_BUILDER_KEY, "value"); } ); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(sendLink)); when(sendLink.send(anyList())).thenReturn(Mono.empty()); final Throwable error = new AmqpException(true, AmqpErrorCondition.SERVER_BUSY_ERROR, "Test-message", new AmqpErrorContext("test-namespace")); // Send a transient error to attempt retry. when(sendLink.send(argThat((Message message) -> message.getApplicationProperties().getValue().containsKey(failureKey)))) .thenReturn(Mono.error(error)) .thenReturn(Mono.error(error)) .thenReturn(Mono.empty()); StepVerifier.create(producer.send(testData)).verifyComplete(); //Assert verify(tracer1, times(1)) .start(eq("EventHubs.send"), any(), eq(ProcessKind.SEND)); verify(tracer1, times(1)) .start(eq("EventHubs.message"), any(), eq(ProcessKind.MESSAGE)); verify(tracer1, times(1)).addLink(any()); verify(tracer1, times(2)).end(eq("success"), isNull(), any()); verifyZeroInteractions(onClientClosed); } /** * Verifies that it fails if we try to send multiple messages that cannot fit in a single message batch. */ @Test void sendTooManyMessages() { // Arrange int maxLinkSize = 1024; final AmqpSendLink link = mock(AmqpSendLink.class); when(link.getLinkSize()).thenReturn(Mono.just(maxLinkSize)); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(link)); // We believe 20 events is enough for that EventDataBatch to be greater than max size. final Flux<EventData> testData = Flux.range(0, 20).flatMap(number -> { final EventData data = new EventData(TEST_CONTENTS.getBytes(UTF_8)); return Flux.just(data); }); // Act & Assert StepVerifier.create(producer.send(testData)) .verifyErrorMatches(error -> error instanceof AmqpException && ((AmqpException) error).getErrorCondition() == AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED); verify(link, times(0)).send(any(Message.class)); } /** * Verifies that the producer can create an {@link EventDataBatch} with the size given by the underlying AMQP send * link. */ @Test void createsEventDataBatch() { // Arrange int maxLinkSize = 1024; // Overhead when serializing an event, to figure out what the maximum size we can use for an event payload. int eventOverhead = 24; int maxEventPayload = maxLinkSize - eventOverhead; final AmqpSendLink link = mock(AmqpSendLink.class); when(link.getLinkSize()).thenReturn(Mono.just(maxLinkSize)); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(link)); // This event is 1024 bytes when serialized. final EventData event = new EventData(new byte[maxEventPayload]); // This event will be 1025 bytes when serialized. final EventData tooLargeEvent = new EventData(new byte[maxEventPayload + 1]); // Act & Assert StepVerifier.create(producer.createBatch()) .assertNext(batch -> { Assertions.assertNull(batch.getPartitionKey()); Assertions.assertTrue(batch.tryAdd(event)); }) .verifyComplete(); StepVerifier.create(producer.createBatch()) .assertNext(batch -> { Assertions.assertNull(batch.getPartitionKey()); Assertions.assertFalse(batch.tryAdd(tooLargeEvent)); }) .verifyComplete(); verify(link, times(2)).getLinkSize(); } /** * Verifies that message spans are started and ended on tryAdd when creating batches to send in {@link * EventDataBatch}. */ @Test void startMessageSpansOnCreateBatch() { // Arrange final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); final EventHubProducerAsyncClient asyncProducer = new EventHubProducerAsyncClient(HOSTNAME, EVENT_HUB_NAME, connectionProcessor, retryOptions, tracerProvider, messageSerializer, Schedulers.parallel(), false, onClientClosed); final AmqpSendLink link = mock(AmqpSendLink.class); when(link.getLinkSize()).thenReturn(Mono.just(ClientConstants.MAX_MESSAGE_LENGTH_BYTES)); when(link.getHostname()).thenReturn(HOSTNAME); when(link.getEntityPath()).thenReturn(ENTITY_PATH); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(link)); when(tracer1.start(eq("EventHubs.message"), any(), eq(ProcessKind.MESSAGE))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertEquals(passed.getData(AZ_TRACING_NAMESPACE_KEY).get(), AZ_NAMESPACE_VALUE); assertEquals(passed.getData(ENTITY_PATH_KEY).get(), ENTITY_PATH); assertEquals(passed.getData(HOST_NAME_KEY).get(), HOSTNAME); return passed.addData(PARENT_SPAN_KEY, "value").addData(DIAGNOSTIC_ID_KEY, "value2"); } ); // Act & Assert StepVerifier.create(asyncProducer.createBatch()) .assertNext(batch -> { Assertions.assertTrue(batch.tryAdd(new EventData("Hello World".getBytes(UTF_8)))); }) .verifyComplete(); verify(tracer1, times(1)) .start(eq("EventHubs.message"), any(), eq(ProcessKind.MESSAGE)); verify(tracer1, times(1)).end(eq("success"), isNull(), any()); verifyZeroInteractions(onClientClosed); } /** * Verifies we can create an EventDataBatch with partition key and link size. */ @Test void createsEventDataBatchWithPartitionKey() { // Arrange int maxLinkSize = 1024; // No idea what the overhead for adding partition key is. But we know this will be smaller than the max size. int eventPayload = maxLinkSize - 100; final AmqpSendLink link = mock(AmqpSendLink.class); when(link.getLinkSize()).thenReturn(Mono.just(maxLinkSize)); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(link)); // This event is 1024 bytes when serialized. final EventData event = new EventData(new byte[eventPayload]); final CreateBatchOptions options = new CreateBatchOptions().setPartitionKey("some-key"); // Act & Assert StepVerifier.create(producer.createBatch(options)) .assertNext(batch -> { Assertions.assertEquals(options.getPartitionKey(), batch.getPartitionKey()); Assertions.assertTrue(batch.tryAdd(event)); }) .verifyComplete(); } /** * Verifies we cannot create an EventDataBatch if the BatchOptions size is larger than the link. */ @Test void createEventDataBatchWhenMaxSizeIsTooBig() { // Arrange int maxLinkSize = 1024; int batchSize = maxLinkSize + 10; final AmqpSendLink link = mock(AmqpSendLink.class); when(link.getLinkSize()).thenReturn(Mono.just(maxLinkSize)); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(link)); // This event is 1024 bytes when serialized. final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize); // Act & Assert StepVerifier.create(producer.createBatch(options)) .expectError(IllegalArgumentException.class) .verify(); } /** * Verifies that the producer can create an {@link EventDataBatch} with a given {@link * CreateBatchOptions#getMaximumSizeInBytes()}. */ @Test void createsEventDataBatchWithSize() { // Arrange int maxLinkSize = 10000; int batchSize = 1024; // Overhead when serializing an event, to figure out what the maximum size we can use for an event payload. int eventOverhead = 24; int maxEventPayload = batchSize - eventOverhead; final AmqpSendLink link = mock(AmqpSendLink.class); when(link.getLinkSize()).thenReturn(Mono.just(maxLinkSize)); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(link)); // This event is 1024 bytes when serialized. final EventData event = new EventData(new byte[maxEventPayload]); // This event will be 1025 bytes when serialized. final EventData tooLargeEvent = new EventData(new byte[maxEventPayload + 1]); final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize); // Act & Assert StepVerifier.create(producer.createBatch(options)) .assertNext(batch -> { Assertions.assertNull(batch.getPartitionKey()); Assertions.assertTrue(batch.tryAdd(event)); }) .verifyComplete(); StepVerifier.create(producer.createBatch(options)) .assertNext(batch -> { Assertions.assertNull(batch.getPartitionKey()); Assertions.assertFalse(batch.tryAdd(tooLargeEvent)); }) .verifyComplete(); } @Test void sendEventRequired() { // Arrange final EventData event = new EventData("Event-data"); final SendOptions sendOptions = new SendOptions(); StepVerifier.create(producer.send(event, null)) .verifyError(NullPointerException.class); StepVerifier.create(producer.send((EventData) null, sendOptions)) .verifyError(NullPointerException.class); } @Test void sendEventIterableRequired() { // Arrange final List<EventData> event = Collections.singletonList(new EventData("Event-data")); final SendOptions sendOptions = new SendOptions(); StepVerifier.create(producer.send(event, null)) .verifyError(NullPointerException.class); StepVerifier.create(producer.send((Iterable<EventData>) null, sendOptions)) .verifyError(NullPointerException.class); } @Test void sendEventFluxRequired() { // Arrange final Flux<EventData> event = Flux.just(new EventData("Event-data")); final SendOptions sendOptions = new SendOptions(); StepVerifier.create(producer.send(event, null)) .verifyError(NullPointerException.class); StepVerifier.create(producer.send((Flux<EventData>) null, sendOptions)) .verifyError(NullPointerException.class); } @Test void batchOptionsIsCloned() { // Arrange int maxLinkSize = 1024; final AmqpSendLink link = mock(AmqpSendLink.class); when(link.getLinkSize()).thenReturn(Mono.just(maxLinkSize)); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(link)); final String originalKey = "some-key"; final CreateBatchOptions options = new CreateBatchOptions().setPartitionKey(originalKey); // Act & Assert StepVerifier.create(producer.createBatch(options)) .assertNext(batch -> { options.setPartitionKey("something-else"); Assertions.assertEquals(originalKey, batch.getPartitionKey()); }) .verifyComplete(); } @Test void sendsAnEventDataBatch() { // Arrange int maxLinkSize = 1024; // Overhead when serializing an event, to figure out what the maximum size we can use for an event payload. int eventOverhead = 24; int maxEventPayload = maxLinkSize - eventOverhead; final AmqpSendLink link = mock(AmqpSendLink.class); when(link.getLinkSize()).thenReturn(Mono.just(maxLinkSize)); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(link)); // This event is 1024 bytes when serialized. final EventData event = new EventData(new byte[maxEventPayload]); // This event will be 1025 bytes when serialized. final EventData tooLargeEvent = new EventData(new byte[maxEventPayload + 1]); // Act & Assert StepVerifier.create(producer.createBatch()) .assertNext(batch -> { Assertions.assertNull(batch.getPartitionKey()); Assertions.assertTrue(batch.tryAdd(event)); }) .verifyComplete(); StepVerifier.create(producer.createBatch()) .assertNext(batch -> { Assertions.assertNull(batch.getPartitionKey()); Assertions.assertFalse(batch.tryAdd(tooLargeEvent)); }) .verifyComplete(); verify(link, times(2)).getLinkSize(); } /** * Verify we can send messages to multiple partitionIds with same sender. */ @Test void sendMultiplePartitions() { // Arrange final int count = 4; final byte[] contents = TEST_CONTENTS.getBytes(UTF_8); final Flux<EventData> testData = Flux.range(0, count).flatMap(number -> { final EventData data = new EventData(contents); return Flux.just(data); }); final String partitionId1 = "my-partition-id"; final String partitionId2 = "my-partition-id-2"; when(sendLink2.send(anyList())).thenReturn(Mono.empty()); when(sendLink2.getLinkSize()).thenReturn(Mono.just(ClientConstants.MAX_MESSAGE_LENGTH_BYTES)); when(sendLink3.send(anyList())).thenReturn(Mono.empty()); when(sendLink3.getLinkSize()).thenReturn(Mono.just(ClientConstants.MAX_MESSAGE_LENGTH_BYTES)); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(anyString(), anyString(), any())).thenAnswer(mock -> { final String entityPath = mock.getArgument(1, String.class); if (EVENT_HUB_NAME.equals(entityPath)) { return Mono.just(sendLink); } else if (entityPath.endsWith(partitionId1)) { return Mono.just(sendLink3); } else if (entityPath.endsWith(partitionId2)) { return Mono.just(sendLink2); } else { return Mono.error(new IllegalArgumentException("Could not figure out entityPath: " + entityPath)); } }); when(sendLink.send(anyList())).thenReturn(Mono.empty()); // Act StepVerifier.create(producer.send(testData, new SendOptions())) .verifyComplete(); StepVerifier.create(producer.send(testData, new SendOptions().setPartitionId(partitionId1))) .verifyComplete(); StepVerifier.create(producer.send(testData, new SendOptions().setPartitionId(partitionId2))) .verifyComplete(); // Assert verify(sendLink).send(messagesCaptor.capture()); final List<Message> messagesSent = messagesCaptor.getValue(); Assertions.assertEquals(count, messagesSent.size()); verify(sendLink3, times(1)).send(anyList()); verify(sendLink2, times(1)).send(anyList()); } /** * Verifies that when we have a shared connection, the producer does not close that connection. */ @Test void doesNotCloseSharedConnection() { // Arrange EventHubConnectionProcessor hubConnection = mock(EventHubConnectionProcessor.class); EventHubProducerAsyncClient sharedProducer = new EventHubProducerAsyncClient(HOSTNAME, EVENT_HUB_NAME, hubConnection, retryOptions, tracerProvider, messageSerializer, Schedulers.parallel(), true, onClientClosed); // Act sharedProducer.close(); // Verify verify(hubConnection, never()).dispose(); verify(onClientClosed).run(); } /** * Verifies that when we have a non-shared connection, the producer closes that connection. */ @Test void closesDedicatedConnection() { // Arrange EventHubConnectionProcessor hubConnection = mock(EventHubConnectionProcessor.class); EventHubProducerAsyncClient dedicatedProducer = new EventHubProducerAsyncClient(HOSTNAME, EVENT_HUB_NAME, hubConnection, retryOptions, tracerProvider, messageSerializer, Schedulers.parallel(), false, onClientClosed); // Act dedicatedProducer.close(); // Verify verify(hubConnection, times(1)).dispose(); verifyZeroInteractions(onClientClosed); } /** * Verifies that when we have a non-shared connection, the producer closes that connection. Only once. */ @Test void closesDedicatedConnectionOnlyOnce() { // Arrange EventHubConnectionProcessor hubConnection = mock(EventHubConnectionProcessor.class); EventHubProducerAsyncClient dedicatedProducer = new EventHubProducerAsyncClient(HOSTNAME, EVENT_HUB_NAME, hubConnection, retryOptions, tracerProvider, messageSerializer, Schedulers.parallel(), false, onClientClosed); // Act dedicatedProducer.close(); dedicatedProducer.close(); // Verify verify(hubConnection, times(1)).dispose(); verifyZeroInteractions(onClientClosed); } /** * Verifies that another link is received and we can continue publishing events on a transient failure. */ @Test void reopensOnFailure() { // Arrange when(connection.getEndpointStates()).thenReturn(endpointProcessor); endpointSink.next(AmqpEndpointState.ACTIVE); EventHubAmqpConnection[] connections = new EventHubAmqpConnection[]{ connection, connection2, connection3 }; connectionProcessor = Flux.<EventHubAmqpConnection>create(sink -> { final AtomicInteger count = new AtomicInteger(); sink.onRequest(request -> { for (int i = 0; i < request; i++) { final int current = count.getAndIncrement(); final int index = current % connections.length; sink.next(connections[index]); } }); }).subscribeWith( new EventHubConnectionProcessor(EVENT_HUB_NAME, connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry())); producer = new EventHubProducerAsyncClient(HOSTNAME, EVENT_HUB_NAME, connectionProcessor, retryOptions, tracerProvider, messageSerializer, Schedulers.parallel(), false, onClientClosed); final int count = 4; final byte[] contents = TEST_CONTENTS.getBytes(UTF_8); final Flux<EventData> testData = Flux.range(0, count).flatMap(number -> { final EventData data = new EventData(contents); return Flux.just(data); }); final EventData testData2 = new EventData("test"); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(sendLink)); when(sendLink.send(anyList())).thenReturn(Mono.empty()); final DirectProcessor<AmqpEndpointState> connectionState2 = DirectProcessor.create(); when(connection2.getEndpointStates()).thenReturn(connectionState2); when(connection2.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(sendLink2)); when(sendLink2.send(any(Message.class))).thenReturn(Mono.empty()); final DirectProcessor<AmqpEndpointState> connectionState3 = DirectProcessor.create(); when(connection3.getEndpointStates()).thenReturn(connectionState3); when(connection3.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(sendLink3)); when(sendLink3.send(anyList())).thenReturn(Mono.empty()); // Act StepVerifier.create(producer.send(testData)) .verifyComplete(); // Send in an error signal like a server busy condition. endpointSink.error(new AmqpException(true, AmqpErrorCondition.SERVER_BUSY_ERROR, "Test-message", new AmqpErrorContext("test-namespace"))); StepVerifier.create(producer.send(testData2)) .verifyComplete(); // Assert verify(sendLink).send(messagesCaptor.capture()); final List<Message> messagesSent = messagesCaptor.getValue(); Assertions.assertEquals(count, messagesSent.size()); verify(sendLink2, times(1)).send(any(Message.class)); verifyZeroInteractions(sendLink3); verifyZeroInteractions(onClientClosed); } /** * Verifies that on a non-transient failure, no more event hub connections are recreated and we can not send events. * An error should be propagated back to us. */ @Test void closesOnNonTransientFailure() { // Arrange when(connection.getEndpointStates()).thenReturn(endpointProcessor); endpointSink.next(AmqpEndpointState.ACTIVE); EventHubAmqpConnection[] connections = new EventHubAmqpConnection[]{ connection, connection2, connection3 }; connectionProcessor = Flux.<EventHubAmqpConnection>create(sink -> { final AtomicInteger count = new AtomicInteger(); sink.onRequest(request -> { for (int i = 0; i < request; i++) { final int current = count.getAndIncrement(); final int index = current % connections.length; sink.next(connections[index]); } }); }).subscribeWith( new EventHubConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(), EVENT_HUB_NAME, connectionOptions.getRetry())); producer = new EventHubProducerAsyncClient(HOSTNAME, EVENT_HUB_NAME, connectionProcessor, retryOptions, tracerProvider, messageSerializer, Schedulers.parallel(), false, onClientClosed); final int count = 4; final byte[] contents = TEST_CONTENTS.getBytes(UTF_8); final Flux<EventData> testData = Flux.range(0, count).flatMap(number -> { final EventData data = new EventData(contents); return Flux.just(data); }); final EventData testData2 = new EventData("test"); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(sendLink)); when(sendLink.send(anyList())).thenReturn(Mono.empty()); final DirectProcessor<AmqpEndpointState> connectionState2 = DirectProcessor.create(); when(connection2.getEndpointStates()).thenReturn(connectionState2); when(connection2.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(sendLink2)); when(sendLink2.send(any(Message.class))).thenReturn(Mono.empty()); final AmqpException nonTransientError = new AmqpException(false, AmqpErrorCondition.UNAUTHORIZED_ACCESS, "Test unauthorized access", new AmqpErrorContext("test-namespace")); // Act StepVerifier.create(producer.send(testData)) .verifyComplete(); // Send in an error signal like authorization failure. endpointSink.error(nonTransientError); StepVerifier.create(producer.send(testData2)) .expectErrorSatisfies(error -> { Assertions.assertTrue(error instanceof AmqpException); final AmqpException actual = (AmqpException) error; Assertions.assertEquals(nonTransientError.isTransient(), actual.isTransient()); Assertions.assertEquals(nonTransientError.getContext(), actual.getContext()); Assertions.assertEquals(nonTransientError.getErrorCondition(), actual.getErrorCondition()); Assertions.assertEquals(nonTransientError.getMessage(), actual.getMessage()); }) .verify(Duration.ofSeconds(10)); // Assert verify(sendLink).send(messagesCaptor.capture()); final List<Message> messagesSent = messagesCaptor.getValue(); Assertions.assertEquals(count, messagesSent.size()); verifyZeroInteractions(sendLink2); verifyZeroInteractions(sendLink3); verifyZeroInteractions(onClientClosed); } /** * Verifies that we can resend a message when a transient error occurs. */ @Test void resendMessageOnTransientLinkFailure() { // Arrange when(connection.getEndpointStates()).thenReturn(endpointProcessor); endpointSink.next(AmqpEndpointState.ACTIVE); EventHubAmqpConnection[] connections = new EventHubAmqpConnection[]{connection, connection2}; connectionProcessor = Flux.<EventHubAmqpConnection>create(sink -> { final AtomicInteger count = new AtomicInteger(); sink.onRequest(request -> { for (int i = 0; i < request; i++) { final int current = count.getAndIncrement(); final int index = current % connections.length; sink.next(connections[index]); } }); }).subscribeWith( new EventHubConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(), EVENT_HUB_NAME, connectionOptions.getRetry())); producer = new EventHubProducerAsyncClient(HOSTNAME, EVENT_HUB_NAME, connectionProcessor, retryOptions, tracerProvider, messageSerializer, Schedulers.parallel(), false, onClientClosed); final int count = 4; final byte[] contents = TEST_CONTENTS.getBytes(UTF_8); final Flux<EventData> testData = Flux.range(0, count).flatMap(number -> { final EventData data = new EventData(contents); return Flux.just(data); }); final String failureKey = "fail"; final EventData testData2 = new EventData("test"); testData2.getProperties().put(failureKey, "true"); // EC is the prefix they use when creating a link that sends to the service round-robin. when(connection.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(sendLink)); when(sendLink.send(anyList())).thenReturn(Mono.empty()); // Send a transient error, and close the original link, if we get a message that contains the "failureKey". // This simulates when a link is closed. when(sendLink.send(argThat((Message message) -> { return message.getApplicationProperties().getValue().containsKey(failureKey); }))).thenAnswer(mock -> { final Throwable error = new AmqpException(true, AmqpErrorCondition.SERVER_BUSY_ERROR, "Test-message", new AmqpErrorContext("test-namespace")); endpointSink.error(error); return Mono.error(error); }); final DirectProcessor<AmqpEndpointState> connectionState2 = DirectProcessor.create(); when(connection2.getEndpointStates()).thenReturn(connectionState2); when(connection2.createSendLink(eq(EVENT_HUB_NAME), eq(EVENT_HUB_NAME), eq(retryOptions))) .thenReturn(Mono.just(sendLink2)); when(sendLink2.send(any(Message.class))).thenReturn(Mono.empty()); // Act StepVerifier.create(producer.send(testData)) .verifyComplete(); StepVerifier.create(producer.send(testData2)) .verifyComplete(); // Assert verify(sendLink).send(messagesCaptor.capture()); final List<Message> messagesSent = messagesCaptor.getValue(); Assertions.assertEquals(count, messagesSent.size()); verify(sendLink2, times(1)).send(any(Message.class)); verifyZeroInteractions(sendLink3); verifyZeroInteractions(onClientClosed); } private static final String TEST_CONTENTS = "SSLorem ipsum dolor sit amet, consectetur adipiscing elit. Donec " + "vehicula posuere lobortis. Aliquam finibus volutpat dolor, faucibus pellentesque ipsum bibendum vitae. " + "Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Ut sit amet " + "urna hendrerit, dapibus justo a, sodales justo. Mauris finibus augue id pulvinar congue. Nam maximus " + "luctus ipsum, at commodo ligula euismod ac. Phasellus vitae lacus sit amet diam porta placerat. \n" + "Ut sodales efficitur sapien ut posuere. Morbi sed tellus est. Proin eu erat purus. Proin massa nunc, " + "condimentum id iaculis dignissim, consectetur et odio. Cras suscipit sem eu libero aliquam tincidunt. " + "Nullam ut arcu suscipit, eleifend velit in, cursus libero. Ut eleifend facilisis odio sit amet feugiat. " + "Phasellus at nunc sit amet elit sagittis commodo ac in nisi. Fusce vitae aliquam quam. Integer vel nibh " + "euismod, tempus elit vitae, pharetra est. Duis vulputate enim a elementum dignissim. Morbi dictum enim id " + "elit scelerisque, in elementum nulla pharetra. \n" + "Aenean aliquet aliquet condimentum. Proin dapibus dui id libero tempus feugiat. Sed commodo ligula a " + "lectus mattis, vitae tincidunt velit auctor. Fusce quis semper dui. Phasellus eu efficitur sem. Ut non sem" + " sit amet enim condimentum venenatis id dictum massa. Nullam sagittis lacus a neque sodales, et ultrices " + "arcu mattis. Aliquam erat volutpat. \n" + "Aenean fringilla quam elit, id mattis purus vestibulum nec. Praesent porta eros in dapibus molestie. " + "Vestibulum orci libero, tincidunt et turpis eget, condimentum lobortis enim. Fusce suscipit ante et mauris" + " consequat cursus nec laoreet lorem. Maecenas in sollicitudin diam, non tincidunt purus. Nunc mauris " + "purus, laoreet eget interdum vitae, placerat a sapien. In mi risus, blandit eu facilisis nec, molestie " + "suscipit leo. Pellentesque molestie urna vitae dui faucibus bibendum. \n" + "Donec quis ipsum ultricies, imperdiet ex vel, scelerisque eros. Ut at urna arcu. Vestibulum rutrum odio " + "dolor, vitae cursus nunc pulvinar vel. Donec accumsan sapien in malesuada tempor. Maecenas in condimentum " + "eros. Sed vestibulum facilisis massa a iaculis. Etiam et nibh felis. Donec maximus, sem quis vestibulum " + "gravida, turpis risus congue dolor, pharetra tincidunt lectus nisi at velit."; }
//,temp,VPNUserUsageParser.java,58,119,temp,VolumeUsageParser.java,58,129 //,3 public class xxx { public static boolean parse(AccountVO account, Date startDate, Date endDate) { if (s_logger.isDebugEnabled()) { s_logger.debug("Parsing all Volume usage events for account: " + account.getId()); } if ((endDate == null) || endDate.after(new Date())) { endDate = new Date(); } // - query usage_volume table with the following criteria: // - look for an entry for accountId with start date in the given range // - look for an entry for accountId with end date in the given range // - look for an entry for accountId with end date null (currently running vm or owned IP) // - look for an entry for accountId with start date before given range *and* end date after given range List<UsageVolumeVO> usageUsageVols = s_usageVolumeDao.getUsageRecords(account.getId(), account.getDomainId(), startDate, endDate, false, 0); if (usageUsageVols.isEmpty()) { s_logger.debug("No volume usage events for this period"); return true; } // This map has both the running time *and* the usage amount. Map<String, Pair<Long, Long>> usageMap = new HashMap<String, Pair<Long, Long>>(); Map<String, VolInfo> diskOfferingMap = new HashMap<String, VolInfo>(); // loop through all the usage volumes, create a usage record for each for (UsageVolumeVO usageVol : usageUsageVols) { long volId = usageVol.getId(); Long doId = usageVol.getDiskOfferingId(); long zoneId = usageVol.getZoneId(); Long templateId = usageVol.getTemplateId(); long size = usageVol.getSize(); String key = volId + "-" + doId + "-" + size; diskOfferingMap.put(key, new VolInfo(volId, zoneId, doId, templateId, size)); Date volCreateDate = usageVol.getCreated(); Date volDeleteDate = usageVol.getDeleted(); if ((volDeleteDate == null) || volDeleteDate.after(endDate)) { volDeleteDate = endDate; } // clip the start date to the beginning of our aggregation range if the vm has been running for a while if (volCreateDate.before(startDate)) { volCreateDate = startDate; } if (volCreateDate.after(endDate)) { //Ignore records created after endDate continue; } long currentDuration = (volDeleteDate.getTime() - volCreateDate.getTime()) + 1; // make sure this is an inclusive check for milliseconds (i.e. use n - m + 1 to find total number of millis to charge) updateVolUsageData(usageMap, key, usageVol.getId(), currentDuration); } for (String volIdKey : usageMap.keySet()) { Pair<Long, Long> voltimeInfo = usageMap.get(volIdKey); long useTime = voltimeInfo.second().longValue(); // Only create a usage record if we have a runningTime of bigger than zero. if (useTime > 0L) { VolInfo info = diskOfferingMap.get(volIdKey); createUsageRecord(UsageTypes.VOLUME, useTime, startDate, endDate, account, info.getVolumeId(), info.getZoneId(), info.getDiskOfferingId(), info.getTemplateId(), info.getSize()); } } return true; } };
/* * MIT License * * Copyright (c) 2017-2019 nuls.io * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ package io.nuls.contract.vm.natives.java.lang; import io.nuls.contract.vm.Frame; import io.nuls.contract.vm.MethodArgs; import io.nuls.contract.vm.ObjectRef; import io.nuls.contract.vm.Result; import io.nuls.contract.vm.code.MethodCode; import io.nuls.contract.vm.natives.NativeMethod; import static io.nuls.contract.vm.natives.NativeMethod.NOT_SUPPORT_NATIVE; import static io.nuls.contract.vm.natives.NativeMethod.SUPPORT_NATIVE; public class NativeString { public static final String TYPE = "java/lang/String"; public static Result override(MethodCode methodCode, MethodArgs methodArgs, Frame frame, boolean check) { switch (methodCode.fullName) { case getBytes: if (check) { return SUPPORT_NATIVE; } else { return getBytes(methodCode, methodArgs, frame); } default: return null; } } public static Result nativeRun(MethodCode methodCode, MethodArgs methodArgs, Frame frame, boolean check) { switch (methodCode.fullName) { case intern: if (check) { return SUPPORT_NATIVE; } else { return intern(methodCode, methodArgs, frame); } default: if (check) { return NOT_SUPPORT_NATIVE; } else { frame.nonsupportMethod(methodCode); return null; } } } public static final String getBytes = TYPE + "." + "getBytes" + "()[B"; /** * override * * @see String#getBytes() */ private static Result getBytes(MethodCode methodCode, MethodArgs methodArgs, Frame frame) { ObjectRef objectRef = methodArgs.objectRef; ObjectRef ref = null; if (objectRef != null) { String str = frame.heap.runToString(objectRef); if (str != null) { byte[] bytes = str.getBytes(); ref = frame.heap.newArray(bytes); } } Result result = NativeMethod.result(methodCode, ref, frame); return result; } // public static final String format = TYPE + "." + "format" + "(Ljava/lang/String;[Ljava/lang/Object;)Ljava/lang/String;"; // // /** // * override // * // * @see String#format(String, Object...) // */ // private static Result format(MethodCode methodCode, MethodArgs methodArgs, Frame frame) { // Result result = NativeMethod.result(methodCode, null, frame); // return result; // } public static final String intern = TYPE + "." + "intern" + "()Ljava/lang/String;"; /** * native * * @see String#intern() */ private static Result intern(MethodCode methodCode, MethodArgs methodArgs, Frame frame) { ObjectRef objectRef = methodArgs.objectRef; Result result = NativeMethod.result(methodCode, objectRef, frame); return result; } }
/* * * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.gjhealth.http.interceptor; import com.gjhealth.http.utils.HttpLog; import com.gjhealth.http.utils.HttpUtil; import com.gjhealth.http.utils.Utils; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.net.URLEncoder; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; import okhttp3.FormBody; import okhttp3.HttpUrl; import okhttp3.Interceptor; import okhttp3.MultipartBody; import okhttp3.Request; import okhttp3.Response; import static com.gjhealth.http.utils.HttpUtil.UTF8; /** * <p>描述:动态拦截器</p> * 主要功能是针对参数:<br> * 1.可以获取到全局公共参数和局部参数,统一进行签名sign<br> * 2.可以自定义动态添加参数,类似时间戳timestamp是动态变化的,token(登录了才有),参数签名等<br> * 3.参数值是经过UTF-8编码的<br> * 4.默认提供询问是否动态签名(签名需要自定义),动态添加时间戳等<br> * * 日期: 2017/5/3 15:32 <br> * 版本: v1.0<br> */ @SuppressWarnings(value={"unchecked", "deprecation"}) public abstract class BaseDynamicInterceptor<R extends BaseDynamicInterceptor> implements Interceptor { private HttpUrl httpUrl; private boolean isSign = false; //是否需要签名 private boolean timeStamp = false; //是否需要追加时间戳 private boolean accessToken = false; //是否需要添加token public BaseDynamicInterceptor() { } public boolean isSign() { return isSign; } public R sign(boolean sign) { isSign = sign; return (R) this; } public boolean isTimeStamp() { return timeStamp; } public R timeStamp(boolean timeStamp) { this.timeStamp = timeStamp; return (R) this; } public R accessToken(boolean accessToken) { this.accessToken = accessToken; return (R) this; } public boolean isAccessToken() { return accessToken; } @Override public Response intercept(Chain chain) throws IOException { Request request = chain.request(); if (request.method().equals("GET")) { this.httpUrl = HttpUrl.parse(parseUrl(request.url().url().toString())); request = addGetParamsSign(request); } else if (request.method().equals("POST")) { this.httpUrl = request.url(); request = addPostParamsSign(request); } return chain.proceed(request); } public HttpUrl getHttpUrl() { return httpUrl; } //get 添加签名和公共动态参数 private Request addGetParamsSign(Request request) throws UnsupportedEncodingException { HttpUrl httpUrl = request.url(); HttpUrl.Builder newBuilder = httpUrl.newBuilder(); //获取原有的参数 Set<String> nameSet = httpUrl.queryParameterNames(); ArrayList<String> nameList = new ArrayList<>(); nameList.addAll(nameSet); TreeMap<String, String> oldparams = new TreeMap<>(); for (int i = 0; i < nameList.size(); i++) { String value = httpUrl.queryParameterValues(nameList.get(i)) != null && httpUrl.queryParameterValues(nameList.get(i)).size() > 0 ? httpUrl.queryParameterValues(nameList.get(i)).get(0) : ""; oldparams.put(nameList.get(i), value); } String nameKeys = Collections.singletonList(nameList).toString(); //拼装新的参数 TreeMap<String, String> newParams = dynamic(oldparams); Utils.checkNotNull(newParams, "newParams==null"); for (Map.Entry<String, String> entry : newParams.entrySet()) { String urlValue = URLEncoder.encode(entry.getValue(), UTF8.name()); //原来的URl: https://xxx.xxx.xxx/app/chairdressing/skinAnalyzePower/skinTestResult?appId=10101 if (!nameKeys.contains(entry.getKey())) {//避免重复添加 newBuilder.addQueryParameter(entry.getKey(), urlValue); } } httpUrl = newBuilder.build(); request = request.newBuilder().url(httpUrl).build(); return request; } //post 添加签名和公共动态参数 private Request addPostParamsSign(Request request) throws UnsupportedEncodingException { if (request.body() instanceof FormBody) { FormBody.Builder bodyBuilder = new FormBody.Builder(); FormBody formBody = (FormBody) request.body(); //原有的参数 TreeMap<String, String> oldparams = new TreeMap<>(); for (int i = 0; i < formBody.size(); i++) { oldparams.put(formBody.encodedName(i), formBody.encodedValue(i)); } //拼装新的参数 TreeMap<String, String> newParams = dynamic(oldparams); Utils.checkNotNull(newParams, "newParams==null"); //Logc.i("======post请求参数==========="); for (Map.Entry<String, String> entry : newParams.entrySet()) { String value = URLDecoder.decode(entry.getValue(), UTF8.name()); bodyBuilder.addEncoded(entry.getKey(), value); //Logc.i(entry.getKey() + " -> " + value); } String url = HttpUtil.createUrlFromParams(httpUrl.url().toString(), newParams); HttpLog.i(url); formBody = bodyBuilder.build(); request = request.newBuilder().post(formBody).build(); } else if (request.body() instanceof MultipartBody) { MultipartBody multipartBody = (MultipartBody) request.body(); MultipartBody.Builder bodyBuilder = new MultipartBody.Builder().setType(MultipartBody.FORM); List<MultipartBody.Part> oldparts = multipartBody.parts(); //拼装新的参数 List<MultipartBody.Part> newparts = new ArrayList<>(); newparts.addAll(oldparts); TreeMap<String, String> oldparams = new TreeMap<>(); TreeMap<String, String> newParams = dynamic(oldparams); for (Map.Entry<String, String> stringStringEntry : newParams.entrySet()) { MultipartBody.Part part = MultipartBody.Part.createFormData(stringStringEntry.getKey(), stringStringEntry.getValue()); newparts.add(part); } for (MultipartBody.Part part : newparts) { bodyBuilder.addPart(part); } multipartBody = bodyBuilder.build(); request = request.newBuilder().post(multipartBody).build(); } return request; } //解析前:https://xxx.xxx.xxx/app/chairdressing/skinAnalyzePower/skinTestResult?appId=10101 //解析后:https://xxx.xxx.xxx/app/chairdressing/skinAnalyzePower/skinTestResult private String parseUrl(String url) { if (!"".equals(url) && url.contains("?")) {// 如果URL不是空字符串 url = url.substring(0, url.indexOf('?')); } return url; } /** * 动态处理参数 * * @param dynamicMap * @return 返回新的参数集合 */ public abstract TreeMap<String, String> dynamic(TreeMap<String, String> dynamicMap); }
/* * Copyright 1999-2015 dangdang.com. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * </p> */ package com.dangdang.ddframe.rdb.integrate.dbtbl.statically.pstatement; import com.dangdang.ddframe.rdb.integrate.dbtbl.common.pstatement.AbstractShardingBothForPStatementWithAggregateTest; import com.dangdang.ddframe.rdb.integrate.dbtbl.statically.StaticShardingBothHelper; import com.dangdang.ddframe.rdb.sharding.jdbc.core.datasource.ShardingDataSource; import org.junit.AfterClass; public final class StaticShardingBothForPStatementWithAggregateTest extends AbstractShardingBothForPStatementWithAggregateTest { private static ShardingDataSource shardingDataSource; @Override protected ShardingDataSource getShardingDataSource() { if (null != shardingDataSource) { return shardingDataSource; } shardingDataSource = StaticShardingBothHelper.getShardingDataSource(createDataSourceMap("dataSource_%s")); return shardingDataSource; } @AfterClass public static void clear() { shardingDataSource.close(); } }
/* * 版权所有.(c)2008-2017. 卡尔科技工作室 */ package com.carl.wolf.core.foundation.module; import com.carl.wolf.core.bean.Menu; import com.carl.wolf.core.exception.ScanException; import com.carl.wolf.core.util.JSONUtil; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import java.lang.reflect.Method; import java.util.HashMap; import java.util.Map; /** * 默认的菜单扫描路径,通过method进行反射扫描 * * @author Carl * @date 2017/9/16 * @since 1.0.0 */ public class DefaultMenuScanStrategy implements IMenuScanStrategy { private static final Log logger = LogFactory.getLog(DefaultMenuScanStrategy.class); @Override public Menu process(Object target) throws ScanException { Method method = (Method) target; com.carl.wolf.core.annotation.Menu menu = method.getDeclaredAnnotation(com.carl.wolf.core.annotation.Menu.class); Menu menuVo = new Menu(); menuVo.setIcon(menu.icon()) .setOrder(menu.order()) .setPath(menu.path()) .setTitle(menu.title()) .setTarget(target) .setPros(propertiesResolve(menu.pros())); return menuVo; } /** * 根据class反射获取对象放到map * * @param clz * @return */ private Map<String, Object> propertiesResolve(Class[] clz) { Map<String, Object> pros = new HashMap<>(); for (Class c : clz) { try { pros.putAll(JSONUtil.class2Map(c)); } catch (Exception e) { logger.error("", e); } } return pros; } @Override public boolean support(Object target) { return target instanceof Method; } }
package com.yulu.util; import org.apache.ibatis.io.Resources; import org.apache.ibatis.session.SqlSession; import org.apache.ibatis.session.SqlSessionFactory; import org.apache.ibatis.session.SqlSessionFactoryBuilder; import org.springframework.context.ApplicationContext; import org.springframework.context.support.ClassPathXmlApplicationContext; import java.io.IOException; import java.io.InputStream; /** * Mybatis工具 * 1.提供获取SqlSessionFactory方法(工厂唯一) * 2.提供获取SqlSession方法 */ public class MybatisUtil { private static SqlSessionFactory sqlSessionFactory; // 1.类加载的时候首先加载静态代码块,并且只会加载一次,一般用于加载驱动或创建工厂 static { ApplicationContext ac = new ClassPathXmlApplicationContext("applicationContext.xml"); //3.通过建造者来构造SqlSessionFactory sqlSessionFactory = (SqlSessionFactory)ac.getBean("sqlSessionFactory"); } /** * 1.提供获取SqlSessionFactory方法(工厂唯一) */ public static SqlSessionFactory getSqlSessionFactory() { return sqlSessionFactory; } /** * 2.提供获取SqlSession方法 * @param flag true:自动提交session ,false默认手动提交 */ public static SqlSession getSqlSession(boolean flag) { return sqlSessionFactory.openSession(flag); } }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.isis.metamodel.facets.object.bookmarkpolicy; import org.apache.isis.applib.annotation.BookmarkPolicy; import org.apache.isis.metamodel.facetapi.FacetHolder; public class BookmarkPolicyFacetFallback extends BookmarkPolicyFacetAbstract { public BookmarkPolicyFacetFallback(FacetHolder facetHolder) { super(facetHolder, BookmarkPolicy.NEVER); } }
package com.sallyf.sallyf.Container; import java.util.ArrayList; public class ServiceDefinition<T extends ServiceInterface> implements ServiceRepresentationInterface { private Class alias; private Class<T> type; private ConstructorDefinition constructorDefinition = null; private ArrayList<MethodCallDefinition> methodCallDefinitions = new ArrayList<>(); private ArrayList<String> tags = new ArrayList<>(); private ReferenceInterface configurationReference = null; private boolean autoWire = true; public ServiceDefinition(Class<T> type) { this(type, type); } public ServiceDefinition(Class alias, Class<T> type) { this(alias, type, null); } public ServiceDefinition(Class<T> type, ConfigurationInterface configuration) { this(type, type, configuration); } public ServiceDefinition(Class alias, Class<T> type, ConfigurationInterface configuration) { this.type = type; this.alias = alias; setConfigurationReference(configuration == null ? new DefaultConfigurationReference() : new PlainReference<>(configuration)); setAutoWire(true); } public ServiceDefinition(Class<T> type, ConfigurationInterface configuration, ConstructorDefinition constructorDefinition, ArrayList<MethodCallDefinition> methodCallDefinitions) { this(type, type, configuration, constructorDefinition, methodCallDefinitions); } public ServiceDefinition(Class alias, Class<T> type, ConfigurationInterface configuration, ConstructorDefinition constructorDefinition, ArrayList<MethodCallDefinition> methodCallDefinitions) { this(alias, type, configuration); setAutoWire(false); setConstructorDefinition(constructorDefinition); setMethodCallDefinitions(methodCallDefinitions); } public boolean isAutoWire() { return autoWire; } public ServiceDefinition<T> setAutoWire(boolean autoWire) { this.autoWire = autoWire; return this; } public ReferenceInterface getConfigurationReference() { return configurationReference; } public ServiceDefinition<T> setConfigurationReference(ReferenceInterface configurationReference) { this.configurationReference = configurationReference; return this; } public ConstructorDefinition getConstructorDefinition() { return constructorDefinition; } public ServiceDefinition<T> setConstructorDefinition(ConstructorDefinition constructorDefinition) { this.constructorDefinition = constructorDefinition; return this; } public ArrayList<MethodCallDefinition> getMethodCallDefinitions() { return methodCallDefinitions; } public ServiceDefinition<T> setMethodCallDefinitions(ArrayList<MethodCallDefinition> methodCallDefinitions) { this.methodCallDefinitions = methodCallDefinitions; return this; } public ServiceDefinition<T> addMethodCallDefinitions(MethodCallDefinition methodCallDefinition) { this.methodCallDefinitions.add(methodCallDefinition); return this; } public Class getAlias() { return alias; } public Class<T> getType() { return type; } public ArrayList<String> getTags() { return tags; } public ServiceDefinition<T> addTag(String tag) { tags.add(tag); return this; } }
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hive.common; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import java.util.HashMap; import java.util.Map; import org.junit.Test; import com.google.common.collect.Lists; public class TestStatsSetupConst { @Test public void testSetBasicStatsState_missesUpgrade() { Map<String, String> params=new HashMap<>(); params.put(StatsSetupConst.COLUMN_STATS_ACCURATE, "FALSE"); StatsSetupConst.setBasicStatsState(params, String.valueOf(true)); assertEquals("{\"BASIC_STATS\":\"true\"}",params.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); } @Test public void setColumnStatsState_camelcase() { Map<String, String> params=new HashMap<>(); StatsSetupConst.setColumnStatsState(params, Lists.newArrayList("Foo")); String val1 = params.get(StatsSetupConst.COLUMN_STATS_ACCURATE); StatsSetupConst.setColumnStatsState(params, Lists.newArrayList("Foo")); String val2 = params.get(StatsSetupConst.COLUMN_STATS_ACCURATE); assertEquals(val1, val2); } @Test public void testSetBasicStatsState_none() { Map<String, String> params=new HashMap<>(); StatsSetupConst.setBasicStatsState(params, String.valueOf(true)); assertEquals("{\"BASIC_STATS\":\"true\"}",params.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); } @Test public void testSetBasicStatsState_falseIsAbsent() { Map<String, String> params=new HashMap<>(); StatsSetupConst.setBasicStatsState(params, String.valueOf(true)); StatsSetupConst.setBasicStatsState(params, String.valueOf(false)); assertNull(params.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); } // earlier implementation have quoted boolean values...so the new implementation should preserve this @Test public void testStatColumnEntriesCompat() { Map<String, String> params0=new HashMap<>(); StatsSetupConst.setBasicStatsState(params0, String.valueOf(true)); StatsSetupConst.setColumnStatsState(params0, Lists.newArrayList("Foo")); assertEquals("{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"Foo\":\"true\"}}",params0.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); } @Test public void testColumnEntries_orderIndependence() { Map<String, String> params0=new HashMap<>(); StatsSetupConst.setBasicStatsState(params0, String.valueOf(true)); StatsSetupConst.setColumnStatsState(params0, Lists.newArrayList("Foo","Bar")); Map<String, String> params1=new HashMap<>(); StatsSetupConst.setColumnStatsState(params1, Lists.newArrayList("Bar","Foo")); StatsSetupConst.setBasicStatsState(params1, String.valueOf(true)); assertEquals(params0.get(StatsSetupConst.COLUMN_STATS_ACCURATE),params1.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); } @Test public void testColumnEntries_orderIndependence2() { Map<String, String> params0=new HashMap<>(); // in case jackson is able to deserialize...it may use a different implementation for the map - which may not preserve order StatsSetupConst.setBasicStatsState(params0, String.valueOf(true)); StatsSetupConst.setColumnStatsState(params0, Lists.newArrayList("year")); StatsSetupConst.setColumnStatsState(params0, Lists.newArrayList("year","month")); Map<String, String> params1=new HashMap<>(); StatsSetupConst.setColumnStatsState(params1, Lists.newArrayList("month","year")); StatsSetupConst.setBasicStatsState(params1, String.valueOf(true)); System.out.println(params0.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); assertEquals(params0.get(StatsSetupConst.COLUMN_STATS_ACCURATE),params1.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); } // FIXME: current objective is to keep the previous outputs...but this is possibly bad.. @Test public void testColumnEntries_areKept_whenBasicIsAbsent() { Map<String, String> params=new HashMap<>(); StatsSetupConst.setBasicStatsState(params, String.valueOf(false)); StatsSetupConst.setColumnStatsState(params, Lists.newArrayList("Foo")); assertEquals("{\"COLUMN_STATS\":{\"Foo\":\"true\"}}",params.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); } }
import org.apache.curator.RetryPolicy; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.curator.framework.api.CuratorEvent; import org.apache.curator.framework.api.CuratorListener; import org.apache.curator.framework.api.CuratorWatcher; import org.apache.curator.framework.recipes.cache.PathChildrenCache; import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; import org.apache.curator.framework.state.ConnectionState; import org.apache.curator.framework.state.ConnectionStateListener; import org.apache.curator.retry.ExponentialBackoffRetry; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.WatchedEvent; import java.io.UnsupportedEncodingException; /** * PathChildrenCacheExample * <p> * PathChildrenCache: * (1)永久监听指定节点下的节点 * (2)只能监听指定节点下一级节点的变化,比如说指定节点”/example”, 在下面添加”node1”可以监听到,但是添加”node1/n1”就不能被监听到了 * (3)可以监听到的事件:节点创建、节点数据的变化、节点删除等 * <p> * 监听指定节点的子节点的变更包括添加,删除,子节点数据数据变更这三类。 * * @author xiuyuhang [xiuyuhang] * @since 2018-03-20 */ public class PathChildrenCacheExample { public static void main(String[] args) throws Exception { CuratorFramework client = getClient(); client.getConnectionStateListenable().addListener(new ConnectionStateListener() { @Override public void stateChanged(CuratorFramework client, ConnectionState newState) { System.out.println("Zookeeper connection state changed " + newState); } }); // String parentPath = "/soar"; String path = "/soar/service1"; String pathChild = "/soar/service1/127.0.0.1"; PathChildrenCache pathChildrenCache = new PathChildrenCache(client, path, false); //guaranteed保证节点被删除 // client.delete().guaranteed().forPath(parentPath); //checkExists 如果返回数据 说明节点存在 为null则不存在 // if (client.checkExists().forPath(parentPath) == null) { // client.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(parentPath); // } if (client.checkExists().forPath(path) == null) { client.create().creatingParentsIfNeeded().forPath(path); client.setData().forPath(path, "test".getBytes("utf-8")); } Thread.sleep(1000); // System.out.println(client.getChildren().usingWatcher(new CuratorWatcher() { // // @Override // public void process(WatchedEvent event) throws Exception { // System.out.println(event.getPath()); // System.out.println(event.getType()); // System.out.println(event.getState()); // } // //这里 /soar是父节点,service1是子节点. // }).forPath("/soar")); // Thread.sleep(1000); // // //过一会会自动断掉 会删除这个子节点 // client.create().withMode(CreateMode.EPHEMERAL).forPath(path); // Thread.sleep(1000); // 此处需留意,如果没有现成睡眠则无法触发监听事件 主要应该是因为是异步的原因 // client.delete().forPath(path); //节点设置数据同样会触发监听器 // client.setData().forPath(path, "test data".getBytes("utf-8")); // Thread.sleep(2000); pathChildrenCache.getListenable().addListener(new PathChildrenCacheListener() { @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { System.out.println("事件类型:" + event.getType() + ";操作节点:" + event.getData().getPath()); System.out.println(new String(event.getData().getData())); } }); pathChildrenCache.start(); // if (client.checkExists().forPath(pathChild) == null) { // client.create().creatingParentsIfNeeded().forPath(pathChild); // } else { // client.delete().guaranteed().forPath(pathChild); // } Thread.sleep(5000); } public static CuratorFramework getClient() throws UnsupportedEncodingException { RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3); CuratorFramework client = CuratorFrameworkFactory.builder() .authorization("digest", "1xife@F1FXX".getBytes("utf-8")) .connectString("10.165.124.48:2181,10.165.124.50:2181,10.165.124.51:2181") .retryPolicy(retryPolicy) .sessionTimeoutMs(6000) .connectionTimeoutMs(3000) .build(); client.start(); return client; } }
package com.benmu.framework; import java.util.HashMap; import java.util.Map; /** * Created by Carry on 2017/8/23. */ public class BMInitConfig { private Map<String, String> mEnvs; private String mActice; private BMInitConfig() { } public Map<String, String> getmEnvs() { return mEnvs; } public void setmEnvs(Map<String, String> mEnvs) { this.mEnvs = mEnvs; } public String getmActice() { return mActice; } public void setmActice(String mActice) { this.mActice = mActice; } public static class Builder { HashMap<String, String> mCustomerEnv; private String mActiveInterceptor; public Builder setCustomerEnv(HashMap<String, String> mCustomerEnv) { this.mCustomerEnv = mCustomerEnv; return this; } public Builder isActiceInterceptor(String active) { this.mActiveInterceptor = active; return this; } public BMInitConfig build() { BMInitConfig initConfig = new BMInitConfig(); initConfig.mEnvs = this.mCustomerEnv; initConfig.mActice = this.mActiveInterceptor; return initConfig; } } }
package com.angelorobson.alternativescene.dtos; import com.angelorobson.alternativescene.enums.GenderEnum; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.fasterxml.jackson.datatype.jsr310.deser.LocalDateDeserializer; import com.fasterxml.jackson.datatype.jsr310.ser.LocalDateSerializer; import org.hibernate.validator.constraints.Email; import org.hibernate.validator.constraints.Length; import org.hibernate.validator.constraints.NotEmpty; import org.springframework.format.annotation.DateTimeFormat; import javax.persistence.Column; import javax.persistence.Enumerated; import javax.validation.constraints.NotNull; import java.time.LocalDate; import java.util.Optional; import static javax.persistence.EnumType.STRING; public class UserAppSaveDto { private Optional<Long> id = Optional.empty(); private String name; private String email; private String password; private LocalDate dateBirth; private String imageUrl; private GenderEnum gender; private String googleAccountId; public Optional<Long> getId() { return id; } public void setId(Optional<Long> id) { this.id = id; } @NotEmpty(message = "Name can not be empty.") @Length(min = 3, max = 200, message = "Name must contain between 3 and 200 characters.") public String getName() { return name; } public void setName(String name) { this.name = name; } @NotEmpty(message = "Email can not be empty.") @Length(min = 5, max = 200, message = "Email must contain between 5 and 200 characters.") @Email(message = "Invalid email.") public String getEmail() { return email; } public void setEmail(String email) { this.email = email; } @NotEmpty(message = "Password can not be empty.") public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } @DateTimeFormat(pattern = "yyyy-MM-dd") @JsonSerialize(using = LocalDateSerializer.class) @JsonDeserialize(using = LocalDateDeserializer.class) public LocalDate getDateBirth() { return dateBirth; } public void setDateBirth(LocalDate dateBirth) { this.dateBirth = dateBirth; } public String getImageUrl() { return imageUrl; } public void setImageUrl(String imageUrl) { this.imageUrl = imageUrl; } @Column(nullable = false) public String getGoogleAccountId() { return googleAccountId; } public void setGoogleAccountId(String googleAccountId) { this.googleAccountId = googleAccountId; } @Enumerated(STRING) @Column(nullable = false) public GenderEnum getGender() { return gender; } public void setGender(GenderEnum gender) { this.gender = gender; } @Override public String toString() { return "UserAppSaveDto{" + "id=" + id + ", name='" + name + '\'' + ", email='" + email + '\'' + ", password='" + password + '\'' + ", dateBirth=" + dateBirth + '}'; } }
/** * boilerpipe * * Copyright (c) 2009, 2014 Christian Kohlschütter * * The author licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.maboberlin.boilerpipe.filters.heuristics; import java.util.List; import java.util.ListIterator; import java.util.Set; import com.github.maboberlin.boilerpipe.BoilerpipeFilter; import com.github.maboberlin.boilerpipe.BoilerpipeProcessingException; import com.github.maboberlin.boilerpipe.document.TextBlock; import com.github.maboberlin.boilerpipe.document.TextDocument; /** * Adds the labels of the preceding block to the current block, optionally adding a prefix. */ public final class AddPrecedingLabelsFilter implements BoilerpipeFilter { public static final AddPrecedingLabelsFilter INSTANCE = new AddPrecedingLabelsFilter(""); public static final AddPrecedingLabelsFilter INSTANCE_PRE = new AddPrecedingLabelsFilter("^"); private final String labelPrefix; /** * Creates a new {@link AddPrecedingLabelsFilter} instance. * * @param maxBlocksDistance The maximum distance in blocks. * @param contentOnly */ public AddPrecedingLabelsFilter(final String labelPrefix) { this.labelPrefix = labelPrefix; } public boolean process(TextDocument doc) throws BoilerpipeProcessingException { List<TextBlock> textBlocks = doc.getTextBlocks(); if (textBlocks.size() < 2) { return false; } boolean changes = false; int remaining = textBlocks.size(); TextBlock blockBelow = null; TextBlock block; for (ListIterator<TextBlock> it = textBlocks.listIterator(textBlocks.size()); it.hasPrevious();) { if (--remaining <= 0) { break; } if (blockBelow == null) { blockBelow = it.previous(); continue; } block = it.previous(); Set<String> labels = block.getLabels(); if (labels != null && !labels.isEmpty()) { for (String l : labels) { blockBelow.addLabel(labelPrefix + l); } changes = true; } blockBelow = block; } return changes; } }
package on2019_11.on2019_11_07_2019_2020_ICPC__Asia_Jakarta_Regional_Contest__Online_Mirror__ICPC_Rules__Teams_Preferred_.E___Songwriter; import net.egork.io.InputReader; import net.egork.io.OutputWriter; public class TaskE { public void solve(int testNumber, InputReader in, OutputWriter out) { } }
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.apache.flink.connector.base.source.reader.fetcher; import org.apache.flink.api.connector.source.SourceSplit; import org.apache.flink.connector.base.source.reader.RecordsWithSplitIds; import org.apache.flink.connector.base.source.reader.splitreader.SplitReader; import org.apache.flink.connector.base.source.reader.splitreader.SplitsChange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Queue; import java.util.concurrent.BlockingDeque; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.atomic.AtomicBoolean; /** * The internal fetcher runnable responsible for polling message from the external system. */ public class SplitFetcher<E, SplitT extends SourceSplit> implements Runnable { private static final Logger LOG = LoggerFactory.getLogger(SplitFetcher.class); private final int id; private final BlockingDeque<SplitFetcherTask> taskQueue; // track the assigned splits so we can suspend the reader when there is no splits assigned. private final Map<String, SplitT> assignedSplits; /** The current split assignments for this fetcher. */ private final Queue<SplitsChange<SplitT>> splitChanges; private final BlockingQueue<RecordsWithSplitIds<E>> elementsQueue; private final SplitReader<E, SplitT> splitReader; private final Runnable shutdownHook; private final AtomicBoolean wakeUp; private final AtomicBoolean closed; private FetchTask<E, SplitT> fetchTask; private volatile Thread runningThread; private volatile SplitFetcherTask runningTask = null; SplitFetcher( int id, BlockingQueue<RecordsWithSplitIds<E>> elementsQueue, SplitReader<E, SplitT> splitReader, Runnable shutdownHook) { this.id = id; this.taskQueue = new LinkedBlockingDeque<>(); this.splitChanges = new LinkedList<>(); this.elementsQueue = elementsQueue; this.assignedSplits = new HashMap<>(); this.splitReader = splitReader; this.shutdownHook = shutdownHook; this.wakeUp = new AtomicBoolean(false); this.closed = new AtomicBoolean(false); } @Override public void run() { LOG.info("Starting split fetcher {}", id); try { // Remove the split from the assignments if it is already done. runningThread = Thread.currentThread(); this.fetchTask = new FetchTask<>( splitReader, elementsQueue, ids -> ids.forEach(assignedSplits::remove), runningThread); while (!closed.get()) { runOnce(); } } finally { // Reset the interrupted flag so the shutdown hook do not got interrupted. Thread.interrupted(); shutdownHook.run(); LOG.info("Split fetcher {} exited.", id); } } /** * Package private method to help unit test. */ void runOnce() { try { // The fetch task should run if the split assignment is not empty or there is a split change. if (shouldRunFetchTask()) { runningTask = fetchTask; } else { runningTask = taskQueue.take(); } // Now the running task is not null. If wakeUp() is called after this point, the fetcher // thread will not be interrupted. Instead task.wakeUp() will be called. On the other hand, // If the wakeUp() call was make before this point, the wakeUp flag must have already been // have been set, and the fetcher thread may or may not be interrupted, depending on // whether the wakeUp() call was before or after the runningTask assignment. So the // code does the following: // 1. check and clear the interrupt flag on the fetcher thread to avoid interruption in // later code. // 2. check the wakeUp flag to avoid unnecessary task run. // Note that the runningTask may still encounter the case that the task is waken up before // the it starts running. LOG.debug("Prepare to run {}", runningTask); if (!Thread.interrupted() && !wakeUp.get() && runningTask.run()) { LOG.debug("Finished running task {}", runningTask); // the task has finished running. Set it to null so it won't be enqueued. runningTask = null; } } catch (InterruptedException ie) { if (closed.get()) { // The fetcher is closed, just return; return; } else if (wakeUp.get()) { // The fetcher thread has just been waken up. So ignore the interrupted exception // and continue; LOG.debug("Split fetcher has been waken up."); } else { throw new RuntimeException(String.format( "SplitFetcher thread %d interrupted while polling the records", id), ie); } } // If the task is not null that means this task needs to be re-executed. This only // happens when the task is the fetching task or the task was interrupted. maybeEnqueueTask(runningTask); synchronized (wakeUp) { // Set the running task to null. It is necessary for the shutdown method to avoid // unnecessarily interrupt the running task. runningTask = null; // Set the wakeUp flag to false. wakeUp.set(false); } } /** * Add splits to the split fetcher. This operation is asynchronous. * * @param splitsToAdd the splits to add. */ public void addSplits(List<SplitT> splitsToAdd) { maybeEnqueueTask(new AddSplitsTask<>(splitReader, splitsToAdd, splitChanges, assignedSplits)); wakeUp(true); } /** * Shutdown the split fetcher. */ public void shutdown() { if (closed.compareAndSet(false, true)) { LOG.info("Shutting down split fetcher {}", id); wakeUp(false); } } /** * Package private for unit test. * @return the assigned splits. */ Map<String, SplitT> assignedSplits() { return assignedSplits; } /** * Package private for unit test. * @return true if task queue is not empty, false otherwise. */ boolean isIdle() { return taskQueue.isEmpty() && assignedSplits.isEmpty(); } /** * Check whether the fetch task should run. The fetch task should only run when all * the following conditions are met. * 1. there is no task in the task queue to run. * 2. there are assigned splits * Package private for testing purpose. * * @return whether the fetch task should be run. */ boolean shouldRunFetchTask() { return taskQueue.isEmpty() && !assignedSplits.isEmpty(); } /** * Wake up the fetcher thread. There are only two blocking points in a running fetcher. * 1. Taking the next task out of the task queue. * 2. Running a task. * * <p>They need to be waken up differently. If the fetcher is blocking waiting on the * next task in the task queue, we should just interrupt the fetcher thread. * If the fetcher is running the user split reader, we should call SplitReader.wakeUp() * instead of naively interrupt the thread. * * <p>The correctness can be think of in the following way. The purpose of wake up * is to let the fetcher thread go to the very beginning of the running loop. * There are three major events in each run of the loop. * <ol> * <li>pick a task (blocking) * <li>assign the task to runningTask variable. * <li>run the runningTask. (blocking) * </ol> * We don't need to worry about things after step 3 because there is no blocking point * anymore. * * <p>We always first set the wakeup flag when waking up the fetcher, then use the * value of running task to determine where the fetcher thread is. * <ul> * <li> * If runningThread is null, it is before step 2, so we should interrupt fetcher. * This interruption will not be propagated to the split reader, because the * wakeUp flag will prevent the fetchTask from running. * </li> * <li> * If runningThread is not null, it is after step 2. so we should wakeUp the * split reader instead of interrupt the fetcher. * </li> * </ul> * * <p>The above logic only works in the same {@link #runOnce()} invocation. So we need to * synchronize to ensure the wake up logic do not touch a different invocation. */ void wakeUp(boolean taskOnly) { // Synchronize to make sure the wake up only work for the current invocation of runOnce(). synchronized (wakeUp) { // Do not wake up repeatedly. if (wakeUp.compareAndSet(false, true)) { // Now the wakeUp flag is set. SplitFetcherTask currentTask = runningTask; if (currentTask != null) { // The running task may have missed our wakeUp flag and running, wake it up. LOG.debug("Waking up running task {}", currentTask); currentTask.wakeUp(); } else if (!taskOnly && runningThread != null) { // The task has not started running yet, and it will not run for this // runOnce() invocation due to the wakeUp flag. But we might have to // interrupt the fetcher thread in case it is blocking on the task queue. LOG.debug("Interrupting fetcher thread."); // Only interrupt when the thread has started and there is no running task. runningThread.interrupt(); } } } } private void maybeEnqueueTask(SplitFetcherTask task) { // Only enqueue unfinished non-fetch task. if (!closed.get() && task != null && task != fetchTask && !taskQueue.offerFirst(task)) { throw new RuntimeException( "The task queue is full. This is only theoretically possible when really bad thing happens."); } LOG.debug("Enqueued task {}", task); } }
package org.definitylabs.flue2ent.demo.page.partial; import org.definitylabs.flue2ent.demo.element.Content; import org.definitylabs.flue2ent.element.FindElementBy; import java.util.List; public interface WithContent { @FindElementBy(className = "page-content") List<Content> contents(); default Content content(String text) { return contents().stream() .filter(content -> content.title().contains(text)) .findFirst().orElse(null); } }
package com.mursaat.extendedtextview; import android.content.res.TypedArray; import android.graphics.Color; import android.util.AttributeSet; import android.widget.TextView; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; /** * Used by TextView */ public class GradientManager { private final TextView textView; /** * The colors used in gradient */ private int[] colors; /** * The number of colors possibly displayed in a same time */ private int simultaneousColors; /** * The angle of the gradient */ private int angle; /** * The time separating the apparition of two colors in millisecond */ private int speed; /** * How many gradients are calculated by second */ private int maxFPS; /** * Time interval between each draw (millis) */ private int drawTimeInterval; /** * Current running gradient runnable */ private GradientRunnable runnable; /** * Current scheduled gradient future running */ private ScheduledFuture<?> scheduledFuture = null; /** * The draw-gradient uptime */ private long currentGradientProgress = 0; private static final int ATTR_NOT_FOUND = Integer.MIN_VALUE; public GradientManager(TextView textView) { this.textView = textView; this.initDefaultValues(); } public GradientManager(TextView textView, AttributeSet attrs) { this.textView = textView; this.initFromAttrsValues(attrs); } /** * Initialize the variables of this object * * @param attrs The attributes of the TextView */ @SuppressWarnings("ResourceType") private void initFromAttrsValues(AttributeSet attrs) { // Initialize an array containing id of attributes we want to have final int[] set = { R.attr.colors, R.attr.simultaneousColors, R.attr.angle, R.attr.speed, R.attr.maxFPS }; final TypedArray typedArray = textView.getContext().obtainStyledAttributes(attrs, set); // Get colors array id int colorsArrayId = typedArray.getResourceId(0, ATTR_NOT_FOUND); // Get colors if (colorsArrayId != ATTR_NOT_FOUND) { colors = textView.getResources().getIntArray(colorsArrayId); } else { colors = textView.getResources().getIntArray(R.array.default_gradient_colors); } // Get others attributes simultaneousColors = typedArray.getInt(1, ATTR_NOT_FOUND); angle = typedArray.getInt(2, ATTR_NOT_FOUND); speed = typedArray.getInt(3, ATTR_NOT_FOUND); maxFPS = typedArray.getInt(4, ATTR_NOT_FOUND); if (simultaneousColors == ATTR_NOT_FOUND) { simultaneousColors = 2; } if (angle == ATTR_NOT_FOUND) { angle = 45; } if (speed == ATTR_NOT_FOUND) { speed = 1000; } if (maxFPS == ATTR_NOT_FOUND) { maxFPS = 24; } drawTimeInterval = 1000 / maxFPS; typedArray.recycle(); } /** * Initialize the variables of this object with default values */ private void initDefaultValues() { colors = new int[]{Color.BLUE, Color.RED, Color.GREEN}; simultaneousColors = 2; angle = 45; speed = 2000; maxFPS = 24; drawTimeInterval = 1000 / maxFPS; } public void stopGradient() { synchronized (this) { if (scheduledFuture != null) { // Save gradient state (future possible restart) currentGradientProgress = runnable.getCurrentProgress(); scheduledFuture.cancel(true); runnable = null; scheduledFuture = null; } } } /** * Create a thread which applies the gradient if not exist */ public void startGradient() { synchronized (this) { if (scheduledFuture != null) { return; } final int wf = textView.getWidth(); final int hf = textView.getHeight(); if (wf > 0 && hf > 0) { runnable = new GradientRunnable(textView, colors, simultaneousColors, angle, speed); // Apply saved progress if there is runnable.setCurrentProgress(currentGradientProgress); ScheduledExecutorService scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); scheduledFuture = scheduledExecutor.scheduleAtFixedRate(runnable, 0, drawTimeInterval, TimeUnit.MILLISECONDS); } } } }
/* * Copyright (c) 2012 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package com.google.api.client.util; import com.google.common.io.BaseEncoding; /** * Proxy for base 64 encoding/decoding which matches the Base64 interface in Apache Commons (for * historical reasons). * * @author Yaniv Inbar * @since 1.8 */ public class Base64 { /** * Encodes binary data using the base64 algorithm but does not chunk the output. * * @param binaryData binary data to encode or {@code null} for {@code null} result * @return byte[] containing Base64 characters in their UTF-8 representation or {@code null} for * {@code null} input */ public static byte[] encodeBase64(byte[] binaryData) { if (binaryData == null) { return null; } return BaseEncoding.base64().encode(binaryData).getBytes(); } /** * Encodes binary data using the base64 algorithm but does not chunk the output. * * @param binaryData binary data to encode or {@code null} for {@code null} result * @return String containing Base64 characters or {@code null} for {@code null} input */ public static String encodeBase64String(byte[] binaryData) { if (binaryData == null) { return null; } return BaseEncoding.base64().encode(binaryData); } /** * Encodes binary data using a URL-safe variation of the base64 algorithm but does not chunk the * output. The url-safe variation emits - and _ instead of + and / characters. * * @param binaryData binary data to encode or {@code null} for {@code null} result * @return byte[] containing Base64 characters in their UTF-8 representation or {@code null} for * {@code null} input */ public static byte[] encodeBase64URLSafe(byte[] binaryData) { if (binaryData == null) { return null; } return BaseEncoding.base64Url().omitPadding().encode(binaryData).getBytes(); } /** * Encodes binary data using a URL-safe variation of the base64 algorithm but does not chunk the * output. The url-safe variation emits - and _ instead of + and / characters. * * @param binaryData binary data to encode or {@code null} for {@code null} result * @return String containing Base64 characters or {@code null} for {@code null} input */ public static String encodeBase64URLSafeString(byte[] binaryData) { if (binaryData == null) { return null; } return BaseEncoding.base64Url().omitPadding().encode(binaryData); } /** * Decodes Base64 data into octets. * * @param base64Data Byte array containing Base64 data or {@code null} for {@code null} result * @return Array containing decoded data or {@code null} for {@code null} input */ public static byte[] decodeBase64(byte[] base64Data) { if (base64Data == null) { return null; } return decodeBase64(new String(base64Data)); } /** * Decodes a Base64 String into octets. * * @param base64String String containing Base64 data or {@code null} for {@code null} result * @return Array containing decoded data or {@code null} for {@code null} input */ public static byte[] decodeBase64(String base64String) { if (base64String == null) { return null; } try { return BaseEncoding.base64().decode(base64String); } catch (IllegalArgumentException e) { return BaseEncoding.base64Url().omitPadding().decode(base64String); } } }
package com.my.o2o.dto; import java.util.List; import com.my.o2o.entity.Shop; import com.my.o2o.enums.ShopStateEnum; public class ShopExecution { //结果状态 private int state; //状态标识 private String stateInfo; //店铺数量 private int count; //操作的shop(增删改店铺的时候用到) private Shop shop; //shop列表(查询店铺列表的时候使用) private List<Shop> shopList; public ShopExecution(){ } //店铺操作失败的时候使用的构造器 public ShopExecution(ShopStateEnum stateEnum){ this.state = stateEnum.getState(); this.stateInfo = stateEnum.getStateInfo(); } //店铺操作成功的时候使用的构造器 public ShopExecution(ShopStateEnum stateEnum, Shop shop){ this.state = stateEnum.getState(); this.stateInfo = stateEnum.getStateInfo(); this.shop = shop; } //店铺操作成功的时候使用的构造器 public ShopExecution(ShopStateEnum stateEnum, List<Shop> shopList){ this.state = stateEnum.getState(); this.stateInfo = stateEnum.getStateInfo(); this.shopList = shopList; } public int getState() { return state; } public void setState(int state) { this.state = state; } public String getStateInfo() { return stateInfo; } public void setStateInfo(String stateInfo) { this.stateInfo = stateInfo; } public int getCount() { return count; } public void setCount(int count) { this.count = count; } public Shop getShop() { return shop; } public void setShop(Shop shop) { this.shop = shop; } public List<Shop> getShopList() { return shopList; } public void setShopList(List<Shop> shopList) { this.shopList = shopList; } }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jclouds.rackspace.cloudbigdata.v1.internal; import static org.jclouds.openstack.keystone.v2_0.config.KeystoneProperties.CREDENTIAL_TYPE; import static org.jclouds.openstack.keystone.v2_0.config.KeystoneProperties.SERVICE_TYPE; import java.util.Properties; import org.jclouds.openstack.keystone.v2_0.config.CredentialTypes; import org.jclouds.openstack.v2_0.internal.BaseOpenStackMockTest; import org.jclouds.rackspace.cloudbigdata.v1.CloudBigDataApi; import org.jclouds.rackspace.cloudidentity.v2_0.ServiceType; /** * Base class for writing Cloud Big Data Expect tests */ public class BaseCloudBigDataApiMockTest extends BaseOpenStackMockTest<CloudBigDataApi> { protected Properties overrides; /** * Base Mock Test */ public BaseCloudBigDataApiMockTest() { overrides = new Properties(); overrides.setProperty(CREDENTIAL_TYPE, CredentialTypes.PASSWORD_CREDENTIALS); overrides.setProperty(SERVICE_TYPE, ServiceType.BIG_DATA); } }
package com.xiaoyuzhuanqian.mvp.presenter.gaoetask; /** * Created by qm-171 on 2018/4/8. * company qingmo */ import com.trello.rxlifecycle2.LifecycleProvider; import com.trello.rxlifecycle2.android.FragmentEvent; import com.xiaoyuzhuanqian.api.retrofit.BaseObserver; import com.xiaoyuzhuanqian.api.retrofit.NewTransformerManager; import com.xiaoyuzhuanqian.model.ScheduleListBean; import com.xiaoyuzhuanqian.mvp.contract.GaoeListContract; import com.xiaoyuzhuanqian.mvp.presenter.BasePresenter; /** * 用于GEScheduleFragment */ public class SchedulePresenter extends BasePresenter<GaoeListContract.Model, GaoeListContract .ScheduleFragment> { public SchedulePresenter(GaoeListContract.Model mModel, GaoeListContract.ScheduleFragment rootView) { super(mModel, rootView); } public void getGaoeSchedule() { mModel.getGESchedule("gaoe").compose(NewTransformerManager.observableSchedulers((LifecycleProvider) mRootView, FragmentEvent.DESTROY)).subscribe(new BaseObserver<ScheduleListBean>("gaoeschedule") { @Override protected void onStart() { super.onStart(); mRootView.showLoading(); } @Override protected void onSuccess(ScheduleListBean bean) { if (bean != null && bean.getList() != null) { mRootView.updateUI(bean.getList()); } } @Override protected void onFinish() { super.onFinish(); if (mRootView != null) { mRootView.hideLoading(); } } }); } }
package com.example.android.pets.data; import android.app.LoaderManager; import android.content.ContentProvider; import android.content.ContentUris; import android.content.ContentValues; import android.content.UriMatcher; import android.database.Cursor; import android.database.sqlite.SQLiteDatabase; import android.net.Uri; import android.support.annotation.NonNull; import android.support.annotation.Nullable; import android.util.Log; import static android.R.attr.id; import static android.R.attr.name; /** * Created by HP on 25-06-2017. */ public class PetProvider extends ContentProvider { //DataBase helper object private PetDbHelper mDbHelper; public static final String LOG_TAG = PetProvider.class.getSimpleName(); //Uri matcher code for whole TAble private static final int PETS = 100; //Uri matcher code for a specific row private static final int PETS_ID = 101; //Setup UriMatcher Object private static final UriMatcher sUriMatcher = new UriMatcher(UriMatcher.NO_MATCH); //Setting up the UriMatcher variables static{ sUriMatcher.addURI(PetContract.CONTENT_AUTHORITY,PetContract.PATH_PETS,PETS); sUriMatcher.addURI(PetContract.CONTENT_AUTHORITY,PetContract.PATH_PETS + "/#",PETS_ID); } /* Initialize the provider and DataBase */ @Override public boolean onCreate() { mDbHelper = new PetDbHelper(getContext()); return false; } /* Perform the query for the given Uri Use the given projection, selection, se;ection arguments and sort order */ @Override public Cursor query( Uri uri, String[] projection, String selection, String[] selectionArgs, String sortOrder) { //Get Readable DataBase SQLiteDatabase database = mDbHelper.getReadableDatabase(); //This cursor will hold the result of the Query Cursor cursor = null; //Figure out if the given Uri matches with the coded Integer values int match = sUriMatcher.match(uri); //Performing the specific task depending on the value of match switch (match){ case PETS: //for PETS query the whole table Directly cursor = database.query(PetContract.PetEntry.TABLE_NAME,projection,selection,selectionArgs, null,null,sortOrder); break; case PETS_ID: //for specific columns of the table selection = PetContract.PetEntry._ID+"=?"; selectionArgs = new String[] {String.valueOf(ContentUris.parseId(uri))}; cursor = database.query(PetContract.PetEntry.TABLE_NAME,projection,selection,selectionArgs, null,null,sortOrder); break; default: throw new IllegalArgumentException("Cannot query unknown URI"+uri); } //Set notification uri on the cursor //so we know what content uri the cursor was created for //If the data at this uri changes then we knoe that we need to reload the cursor cursor.setNotificationUri(getContext().getContentResolver(), uri); return cursor; } /* Inserts new data into the provider with the given Content values */ @Override public Uri insert( Uri uri, ContentValues contentValues) { final int match = sUriMatcher.match(uri); switch (match){ case PETS: return insertPet(uri, contentValues); default: throw new IllegalArgumentException("Insertion is not supported for "+uri); } } /* insertPet() to insert a particular column */ private Uri insertPet(Uri uri, ContentValues values) { //Check if the name is not null String name = values.getAsString(PetContract.PetEntry.COLUMN_PET_NAME); if (name == null) throw new IllegalArgumentException("Pet requires a Name"); //Check if weight is not negetive Integer weight = values.getAsInteger(PetContract.PetEntry.COLUMN_PET_WEIGHT); if (weight < 0 && weight != null) throw new IllegalArgumentException("The weight should be positive"); //Check if gender is not null Integer gender = values.getAsInteger(PetContract.PetEntry.COLUMN_PET_GENDER); if ((gender == null) || !(PetContract.PetEntry.isValidGender(gender))) throw new IllegalArgumentException("The gender is reqiured to be entered"); //Get a writable DataBase SQLiteDatabase database = mDbHelper.getWritableDatabase(); //Insert the database long id = database.insert(PetContract.PetEntry.TABLE_NAME, null, values); if (id == -1){ Log.e(LOG_TAG, "Failed to Insert the Data " + uri); return null; } //Notify all the listeners that the data has changed getContext().getContentResolver().notifyChange(uri, null); return ContentUris.withAppendedId(uri,id); } /* Delete the Data at the given selection and selection arguments */ @Override public int delete( Uri uri,String selection, String[] selectionArgs) { SQLiteDatabase database = mDbHelper.getWritableDatabase(); final int match = sUriMatcher.match(uri); //Perform the delete on the database and get the number of rows affected int rowsDeleted = database.delete(PetContract.PetEntry.TABLE_NAME,selection,selectionArgs); //if one or more rows have been updated then notify all the Listeners that the uri has changed if(rowsDeleted != 0) getContext().getContentResolver().notifyChange(uri, null); switch(match){ case PETS: //Delete all the rows that match the selection and selectionArgs return database.delete(PetContract.PetEntry.TABLE_NAME,selection,selectionArgs); case PETS_ID: //Delete a single row by the ID given in the URI selection = PetContract.PetEntry._ID + "=?"; selectionArgs = new String[] {String.valueOf(ContentUris.parseId(uri))}; return database.delete(PetContract.PetEntry.TABLE_NAME,selection,selectionArgs); default: throw new IllegalArgumentException("Deletion nt Supported for"+uri); } } /* Updates the data at the given selection and selection arguments with the given content values */ @Override public int update( Uri uri, ContentValues contentValues, String selection,String[] selectionArgs) { final int match = sUriMatcher.match(uri); switch (match) { case PETS: return updatePets(uri, contentValues, selection, selectionArgs); case PETS_ID: //For the PETS_ID case code, extract out the id from the uri //So we know which row to update. selection will be "id=?" and //selection argmuments will be a String array connecting the actual ID. selection = PetContract.PetEntry._ID + "=?"; selectionArgs = new String[]{String.valueOf(ContentUris.parseId(uri))}; return updatePets(uri, contentValues, selection, selectionArgs); default: throw new IllegalArgumentException("Update not supported for "+uri ); } } private int updatePets(Uri uri, ContentValues contentValues, String selection, String[] selectionArgs) { //Get Readable DataBase SQLiteDatabase database = mDbHelper.getReadableDatabase(); //This cursor will hold the result of the Query Cursor cursor = null; //Figure out if the given Uri matches with the coded Integer values int match = sUriMatcher.match(uri); //Sanity check for valid name if(contentValues.containsKey(PetContract.PetEntry.COLUMN_PET_NAME)) { String name = contentValues.getAsString(PetContract.PetEntry.COLUMN_PET_NAME); if (name == null) throw new IllegalArgumentException("Enter a Pet Name"); } //Sanity Check for valid weight if(contentValues.containsKey(PetContract.PetEntry.COLUMN_PET_WEIGHT)) { Integer weight = contentValues.getAsInteger(PetContract.PetEntry.COLUMN_PET_WEIGHT); if (weight != null && weight<0) throw new IllegalArgumentException("Enter a Pet Weight"); } //Sanity Check for valid gender if(contentValues.containsKey(PetContract.PetEntry.COLUMN_PET_GENDER)) { Integer gender = contentValues.getAsInteger(PetContract.PetEntry.COLUMN_PET_GENDER); if((gender == null)||!(PetContract.PetEntry.isValidGender(gender))) throw new IllegalArgumentException("Enter a Pet Gender"); } //if there is new values to update then dont update if(contentValues.size()==0) return 0; //Get the database to writing mode database = mDbHelper.getWritableDatabase(); //Perform the update on the database and get the number of rows affected int rowsUpdated = database.update(PetContract.PetEntry.TABLE_NAME,contentValues,selection,selectionArgs); //if one or more rows have been updated then notify all the Listeners that the uri has changed if(rowsUpdated != 0) getContext().getContentResolver().notifyChange(uri, null); return database.update(PetContract.PetEntry.TABLE_NAME,contentValues,selection,selectionArgs); } /* Return the MIME type of the data for the content URI */ @Override public String getType( Uri uri) { final int match = sUriMatcher.match(uri); switch (match){ case PETS: return PetContract.PetEntry.CONTENT_LIST_TYPE; case PETS_ID: return PetContract.PetEntry.CONTENT_ITEM_TYPE; default: throw new IllegalArgumentException("Unknown uri " + uri + "with match " +match); } } }
package com.test.edualitytest.logic; import java.util.ArrayList; public class ContentAggregate extends AggregateBase { //I create the structure for my content private ArrayList<ContentLogic> myListOfContent = new ArrayList<>(); public ContentIterator createIterator() { return new ContentIterator(this); } public int count() { return myListOfContent.size(); } public ContentLogic obtain(int position) { return myListOfContent.get(position); } @Override // public void addItem(Object content) { //CAUTION I'm not doing addItem() I am using // the add() from the structure that I've chosen // myListOfContent.add((Content) content); // } public void addItem(Object item) { } }
package com.suming.plugin.constants; public enum ArrayFunctions { concat, every, filter, find, findIndex, flat, flatMap, forEach, includes, join, map, reduce, reverse, slice, some, sort, splice, unshift, }
package ch.ethz.ssh2.crypto.digest; /** * * SHA-1 implementation based on FIPS PUB 180-1. * * (http://www.itl.nist.gov/fipspubs/fip180-1.htm) * * @author Christian Plattner, plattner@inf.ethz.ch * @version $Id: SHA1.java,v 1.4 2006/02/02 09:11:03 cplattne Exp $ */ public final class SHA1 implements Digest { private int H0, H1, H2, H3, H4; private final byte msg[] = new byte[64]; private final int[] w = new int[80]; private int currentPos; private long currentLen; public SHA1() { reset(); } public final int getDigestLength() { return 20; } public final void reset() { H0 = 0x67452301; H1 = 0xEFCDAB89; H2 = 0x98BADCFE; H3 = 0x10325476; H4 = 0xC3D2E1F0; currentPos = 0; currentLen = 0; } public final void update(byte b[], int off, int len) { for (int i = off; i < (off + len); i++) update(b[i]); } public final void update(byte b[]) { for (int i = 0; i < b.length; i++) update(b[i]); } public final void update(byte b) { // System.out.println(pos + "->" + b); msg[currentPos++] = b; currentLen += 8; if (currentPos == 64) { perform(); currentPos = 0; } } private static final String toHexString(byte[] b) { final String hexChar = "0123456789ABCDEF"; StringBuffer sb = new StringBuffer(); for (int i = 0; i < b.length; i++) { sb.append(hexChar.charAt((b[i] >> 4) & 0x0f)); sb.append(hexChar.charAt(b[i] & 0x0f)); } return sb.toString(); } private final void putInt(byte[] b, int pos, int val) { b[pos] = (byte) (val >> 24); b[pos + 1] = (byte) (val >> 16); b[pos + 2] = (byte) (val >> 8); b[pos + 3] = (byte) val; } public final void digest(byte[] out) { digest(out, 0); } public final void digest(byte[] out, int off) { long l = currentLen; update((byte) 0x80); // padding could be done more efficiently... while (currentPos != 56) update((byte) 0); update((byte) (l >> 56)); update((byte) (l >> 48)); update((byte) (l >> 40)); update((byte) (l >> 32)); update((byte) (l >> 24)); update((byte) (l >> 16)); update((byte) (l >> 8)); update((byte) (l)); // debug(80, H0, H1, H2, H3, H4); putInt(out, off, H0); putInt(out, off + 4, H1); putInt(out, off + 8, H2); putInt(out, off + 12, H3); putInt(out, off + 16, H4); reset(); } /* * private void debug(int t, int A, int B, int C, int D, int E) { * System.out.println(t + ": " + Integer.toHexString(A).toUpperCase() + ", " + * Integer.toHexString(B).toUpperCase() + ", " + * Integer.toHexString(C).toUpperCase() + "," + * Integer.toHexString(D).toUpperCase() + ", " + * Integer.toHexString(E).toUpperCase()); } */ private final void perform() { for (int i = 0; i < 16; i++) w[i] = ((msg[i * 4] & 0xff) << 24) | ((msg[i * 4 + 1] & 0xff) << 16) | ((msg[i * 4 + 2] & 0xff) << 8) | ((msg[i * 4 + 3] & 0xff)); for (int t = 16; t < 80; t++) { int x = w[t - 3] ^ w[t - 8] ^ w[t - 14] ^ w[t - 16]; w[t] = ((x << 1) | (x >>> 31)); } int A = H0; int B = H1; int C = H2; int D = H3; int E = H4; int T; for (int t = 0; t <= 19; t++) { T = ((A << 5) | (A >>> 27)) + ((B & C) | ((~B) & D)) + E + w[t] + 0x5A827999; E = D; D = C; C = ((B << 30) | (B >>> 2)); B = A; A = T; // debug(t, A, B, C, D, E); } for (int t = 20; t <= 39; t++) { T = ((A << 5) | (A >>> 27)) + (B ^ C ^ D) + E + w[t] + 0x6ED9EBA1; E = D; D = C; C = ((B << 30) | (B >>> 2)); B = A; A = T; // debug(t, A, B, C, D, E); } for (int t = 40; t <= 59; t++) { T = ((A << 5) | (A >>> 27)) + ((B & C) | (B & D) | (C & D)) + E + w[t] + 0x8F1BBCDC; E = D; D = C; C = ((B << 30) | (B >>> 2)); B = A; A = T; // debug(t, A, B, C, D, E); } for (int t = 60; t <= 79; t++) { T = ((A << 5) | (A >>> 27)) + (B ^ C ^ D) + E + w[t] + 0xCA62C1D6; E = D; D = C; C = ((B << 30) | (B >>> 2)); B = A; A = T; // debug(t, A, B, C, D, E); } H0 = H0 + A; H1 = H1 + B; H2 = H2 + C; H3 = H3 + D; H4 = H4 + E; // debug(80, H0, H1, H2, H3, H4); } public static void main(String[] args) { SHA1 sha = new SHA1(); byte[] dig1 = new byte[20]; byte[] dig2 = new byte[20]; byte[] dig3 = new byte[20]; /* * We do not specify a charset name for getBytes(), since we assume that * the JVM's default encoder maps the _used_ ASCII characters exactly as * getBytes("US-ASCII") would do. (Ah, yes, too lazy to catch the * exception that can be thrown by getBytes("US-ASCII")). Note: This has * no effect on the SHA-1 implementation, this is just for the following * test code. */ sha.update("abc".getBytes()); sha.digest(dig1); sha.update("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq".getBytes()); sha.digest(dig2); for (int i = 0; i < 1000000; i++) sha.update((byte) 'a'); sha.digest(dig3); String dig1_res = toHexString(dig1); String dig2_res = toHexString(dig2); String dig3_res = toHexString(dig3); String dig1_ref = "A9993E364706816ABA3E25717850C26C9CD0D89D"; String dig2_ref = "84983E441C3BD26EBAAE4AA1F95129E5E54670F1"; String dig3_ref = "34AA973CD4C4DAA4F61EEB2BDBAD27316534016F"; if (dig1_res.equals(dig1_ref)) System.out.println("SHA-1 Test 1 OK."); else System.out.println("SHA-1 Test 1 FAILED."); if (dig2_res.equals(dig2_ref)) System.out.println("SHA-1 Test 2 OK."); else System.out.println("SHA-1 Test 2 FAILED."); if (dig3_res.equals(dig3_ref)) System.out.println("SHA-1 Test 3 OK."); else System.out.println("SHA-1 Test 3 FAILED."); } }
/** * Copyright © 2013-2021 The OpenNTF Domino API Team * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.openntf.domino.xsp.xots; import java.io.PrintWriter; import javax.script.ScriptContext; import javax.script.ScriptEngine; import javax.script.ScriptEngineManager; import javax.script.ScriptException; import org.openntf.domino.Database; import org.openntf.domino.design.AnyFileResource; import org.openntf.domino.design.DatabaseDesign; import org.openntf.domino.thread.AbstractDominoRunnable; import org.openntf.domino.utils.Factory; import org.openntf.domino.utils.Factory.SessionType; public class JSR223Tasklet extends AbstractDominoRunnable { private static final long serialVersionUID = 1L; private final String script_; private final String scriptExt_; private final String databasePath_; public JSR223Tasklet(final String scriptName, final Database database) { int extIndex = scriptName.lastIndexOf('.'); scriptExt_ = scriptName.substring(extIndex + 1); DatabaseDesign design = database.getDesign(); AnyFileResource script = design.getAnyFileResource(scriptName); script_ = new String(script.getFileData()); databasePath_ = database.getApiPath(); } @Override public void run() { Database database = Factory.getSession(SessionType.CURRENT).getDatabase(databasePath_); ScriptEngineManager manager = new ScriptEngineManager(); ScriptEngine engine = manager.getEngineByExtension(scriptExt_); engine.put("database", database); //$NON-NLS-1$ engine.put("session", database.getAncestorSession()); //$NON-NLS-1$ ScriptContext context = engine.getContext(); context.setWriter(new PrintWriter(System.out)); context.setErrorWriter(new PrintWriter(System.err)); try { engine.eval(script_); } catch (ScriptException e) { throw new RuntimeException(e); } } }
package net.thucydides.core.statistics.service; import net.thucydides.core.model.TestOutcome; import net.thucydides.core.model.TestTag; import java.util.Set; public interface TagProvider { /** * Returns the tags associated with a given test outcome. */ Set<TestTag> getTagsFor(final TestOutcome testOutcome); }
/* 464 [The "BSD license"] Copyright (c) 2011-2013 Joel Li (李家智) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.beetl.ext.spring; import java.util.Locale; import java.util.Map; import java.util.Map.Entry; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.beetl.core.GroupTemplate; import org.beetl.core.Template; import org.beetl.ext.web.WebRender; import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.springframework.beans.factory.NoUniqueBeanDefinitionException; import org.springframework.web.servlet.view.AbstractTemplateView; /** * @author Chen Rui * */ public class BeetlSpringView extends AbstractTemplateView { /* ----- ----- ----- ----- 属性 ----- ----- ----- ----- */ /** * 视图使用的Beetl GroupTemplate,由ViewResolver注入,如果不设置,取上下文中唯一的GroupTemplate对象 */ protected GroupTemplate groupTemplate = null; /** * 视图使用的Beetl GroupTemplate,由ViewResolver注入,如果不设置,取上下文中唯一的GroupTemplate对象 * * @param groupTemplate * 视图使用的Beetl GroupTemplate,由ViewResolver注入,如果不设置,取上下文中唯一的GroupTemplate对象 */ public void setGroupTemplate(GroupTemplate groupTemplate) { this.groupTemplate = groupTemplate; } public GroupTemplate getGroupTemplate() { return groupTemplate; } /* ----- ----- ----- ----- 构造函数 ----- ----- ----- ----- */ /** * 缺省构造函数 */ public BeetlSpringView() { } /* ----- ----- ----- ----- 实现方法 ----- ----- ----- ----- */ /** * 渲染指定视图 * * @param model * @param request * @param response * @throws NoSuchBeanDefinitionException * 如果未设置GroupTemplate,且Spring上下文中也没有唯一的GroupTemplate bean * @throws NoUniqueBeanDefinitionException * 如果未设置GroupTemplate,且Spring上下文中有多个GroupTemplate bean */ @Override protected void renderMergedTemplateModel(Map<String, Object> model, HttpServletRequest request, HttpServletResponse response) throws NoSuchBeanDefinitionException, NoUniqueBeanDefinitionException { // 如果未指定groupTemplate,取上下文中唯一的GroupTemplate对象 if (groupTemplate == null) { groupTemplate = getApplicationContext().getBean(GroupTemplate.class); } // 渲染方法 WebRender render = new WebRender(groupTemplate) { @Override protected void modifyTemplate(Template template, String key, HttpServletRequest request, HttpServletResponse response, Object... args) { Map<?, ?> model = (Map<?, ?>) args[0]; for (Entry<?, ?> entry : model.entrySet()) { String name = (String) entry.getKey(); Object value = entry.getValue(); template.binding(name, value); } } }; String path = getUrl(); render.render(path, request, response, model); } @Override public boolean checkResource(Locale locale) throws Exception { // BeetlGroupUtilConfiguration config = getApplicationContext().getBean(BeetlGroupUtilConfiguration.class); String url = getUrl(); //去掉ajax 部分。 if (url.contains("#")) { String[] split = url.split("#"); if (split.length > 2) { throw new Exception("视图名称有误:" + url); } return groupTemplate.getResourceLoader().exist(split[0]); }else{ return groupTemplate.getResourceLoader().exist(url); } } }
package mysticalmechanics.api.lubricant; import net.minecraft.nbt.NBTTagCompound; import java.util.Collection; public interface ILubricantCapability { int lubricate(ILubricant lubricant, int amount, boolean simulate); Collection<LubricantStack> getAppliedLubricant(); int getCapacity(); default double getSpeedMod() { double speedMod = 1; for (LubricantStack stack : getAppliedLubricant()) { speedMod *= stack.getLubricant().getSpeedMod(); } return speedMod; } default double getFrictionMod() { double frictionMod = 1; for (LubricantStack stack : getAppliedLubricant()) { frictionMod *= stack.getLubricant().getFrictionMod(); } return frictionMod; } default double getHeatMod() { double heatMod = 1; for (LubricantStack stack : getAppliedLubricant()) { heatMod *= stack.getLubricant().getHeatMod(); } return heatMod; } default NBTTagCompound writeToNBT(NBTTagCompound tag) { return tag; } default void readFromNBT(NBTTagCompound tag) {} }
/************************************************************************************* * Copyright (c) 2013-2018 Red Hat, Inc. and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v2.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v20.html * * Contributors: * JBoss by Red Hat - Initial implementation. ************************************************************************************/ package org.jboss.tools.rsp.stacks.core.model; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.util.ArrayList; import java.util.List; import java.util.Properties; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.jboss.jdf.stacks.client.DefaultStacksClientConfiguration; import org.jboss.jdf.stacks.client.StacksClient; import org.jboss.jdf.stacks.client.StacksClientConfiguration; import org.jboss.jdf.stacks.client.messages.StacksMessages; import org.jboss.jdf.stacks.model.Stacks; import org.jboss.jdf.stacks.parser.Parser; import org.jboss.tools.rsp.eclipse.core.runtime.CoreException; import org.jboss.tools.rsp.eclipse.core.runtime.IProgressMonitor; import org.jboss.tools.rsp.eclipse.core.runtime.Path; import org.jboss.tools.rsp.eclipse.core.runtime.SubProgressMonitor; import org.jboss.tools.rsp.foundation.core.transport.URLTransportCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * A StacksManager is in charge of retrieving a file from a URL or standard * location and returning a jdf.stacks model object generated via the stacks * client. */ public class StacksManager { private static final int DOWNLOAD_STACKS_TIMEOUT = 5000; private static final Logger LOG = LoggerFactory.getLogger(StacksManager.class); @Deprecated private static final String STACKS_URL_PROPERTY = "org.jboss.examples.stacks.url"; private static final String URL_PROPERTY_STACKS = "org.jboss.tools.stacks.url_stacks"; private static final String URL_PROPERTY_PRESTACKS = "org.jboss.tools.stacks.url_prestacks"; private static final String STACKS_URL; private static final String PRESTACKS_URL; // Declare the types of stacks available for fetch public enum StacksType { STACKS_TYPE, PRESTACKS_TYPE } // Load the default stacks url and prestacks url from a sysprop or jar static { STACKS_URL = System.getProperty(URL_PROPERTY_STACKS, System.getProperty(STACKS_URL_PROPERTY, System.getProperty(StacksClientConfiguration.REPO_PROPERTY, getStacksDefaultUrlFromJar()))); PRESTACKS_URL = System.getProperty(URL_PROPERTY_PRESTACKS, System.getProperty("jdf.prestacks.client.repo", getPreStacksDefaultUrlFromJar())); } private File dataFolder; @Deprecated private StacksManager() { super(); } public StacksManager(File dataFolder) { super(); this.dataFolder = dataFolder; } /** * Fetch the default stacks model. * * @param monitor * @return */ public Stacks getStacks(IProgressMonitor monitor) { Stacks[] all = getStacks("Fetching JBoss Stacks", monitor, StacksType.STACKS_TYPE); if (all != null && all.length > 0) return all[0]; return null; } /** * Fetch an array of stacks models where each element represents one of the * StacksType urls * * @param jobName * @param monitor * @param types * @return */ public Stacks[] getStacks(String jobName, IProgressMonitor monitor, StacksType... types) { if (types == null) return new Stacks[0]; LOG.trace("Request received for {} stacks types.", types.length); List<Stacks> ret = new ArrayList<>(types.length); monitor.beginTask(jobName, types.length * 100); for (int i = 0; i < types.length; i++) { switch (types[i]) { case STACKS_TYPE: LOG.trace("Loading Stacks Model from {}", STACKS_URL); Stacks s = getStacks(STACKS_URL, jobName, new SubProgressMonitor(monitor, 50)); if (s == null && !monitor.isCanceled()) { LOG.warn("Stacks from {} can not be read, using client mechanism instead", STACKS_URL ); s = getDefaultStacksFromClient(new SubProgressMonitor(monitor, 50)); } if (s != null) ret.add(s); break; case PRESTACKS_TYPE: // Pre-stacks has no fall-back mechanism at this time LOG.trace("Loading Stacks Model from {}", PRESTACKS_URL); Stacks s2 = getStacks(PRESTACKS_URL, jobName, new SubProgressMonitor(monitor, 100)); if (s2 != null) ret.add(s2); break; default: break; } } monitor.done(); return ret.toArray(new Stacks[ret.size()]); } /** * Fetch the stacks model representing a given arbitrary url. The remote file * will be cached only until the system exits. * * @param url * @param monitor * @return */ public Stacks getStacks(String url, IProgressMonitor monitor) { return getStacksFromURL(url, url, monitor); } /** * Fetch the stacks model for a given url. Cache the remote file with a duration * representing forever, or, until the remote file is newer. * * @param url The url * @param jobName Job name for display purposes * @param monitor * @return */ public Stacks getStacks(String url, String jobName, IProgressMonitor monitor) { return getStacksFromURL(url, jobName, monitor); } protected Stacks getStacksFromURL(String url, String jobName, IProgressMonitor monitor) { Stacks stacks = null; try { LOG.trace("Locating or downloading file for {}", url); File f = getCachedFileForURL(url, jobName, monitor); return getStacksFromFile(f); } catch (Exception e) { LOG.error("Can't access or parse " + url, e); //$NON-NLS-1$ } return stacks; } protected Stacks getStacksFromFile(File f) throws IOException { if (f != null && f.exists()) { LOG.trace("Local file for url exists"); try(FileInputStream fis = new FileInputStream(f)) { Parser p = new Parser(); return p.parse(fis); } } return null; } private Stacks getDefaultStacksFromClient(IProgressMonitor monitor) { if (!monitor.isCanceled()) { final StacksClient client = new StacksClient(new DefaultStacksClientConfiguration(), new JBTStacksMessages()); return runWithTimeout(DOWNLOAD_STACKS_TIMEOUT, client::getStacks); } return null; } public static <R> R runWithTimeout(long millisTimeout, Callable<R> callable) { ExecutorService singleThreadExecutor = Executors.newFixedThreadPool(1); Future<R> future = singleThreadExecutor.submit(callable); try { return future.get(millisTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { } finally { singleThreadExecutor.shutdown(); } return null; } /** * Fetch a local cache of the remote file. If the remote file is newer than the * local, update it. * * @param url A url to fetch the stacks model from * @param jobName A job name passed into the downloader for display purposes * @param monitor A file representign the model * @return */ protected File getCachedFileForURL(String url, String jobName, IProgressMonitor monitor) throws CoreException { URLTransportCache c = getCache(); if( c == null ) return null; if( c.isCacheOutdated(url, monitor)) { return c.downloadAndCache(url,jobName, 10000, true, monitor); } else { // Else use the local cache return c.getCachedFile(url); } } private URLTransportCache cache; private URLTransportCache getCache() { if( cache == null ) { if( dataFolder != null ) { File stacks = new File(dataFolder, "stacks"); cache = URLTransportCache.getCache(new Path(stacks.getAbsolutePath())); } else { try { File tmpDir = Files.createTempDirectory("rsp-stacks").toFile(); tmpDir.mkdirs(); File stacks = new File(dataFolder, "stacks"); cache = URLTransportCache.getCache(new Path(stacks.getAbsolutePath())); } catch(IOException ioe) { LOG.error(ioe.getMessage(), ioe); } } } return cache; } /* * Read the stacks.yaml location from inside our client jar */ private static String getStacksDefaultUrlFromJar() { return getUrlFromJar(StacksClientConfiguration.REPO_PROPERTY); } private static String getPreStacksDefaultUrlFromJar() { return getUrlFromJar(StacksClientConfiguration.PRESTACKS_REPO_PROPERTY); } private static String getUrlFromJar(String prop) { try (InputStream is = StacksManager.class.getResourceAsStream("/org/jboss/jdf/stacks/client/config.properties")){ Properties p = new Properties(); p.load(is); return p.getProperty(prop); } catch (Exception e) { LOG.warn("Can't read stacks url from the stacks-client.jar", e); //$NON-NLS-1$ } return null; } private static class JBTStacksMessages implements StacksMessages { public void showDebugMessage(String arg0) { LOG.trace(arg0); } public void showInfoMessage(String arg0) { LOG.info(arg0); } public void showErrorMessage(String arg0) { LOG.error(arg0); } public void showErrorMessageWithCause(String arg0, Throwable t) { LOG.error(arg0, t); } public void showWarnMessage(String arg0) { LOG.warn(arg0); } } }
/* * Copyright 2017 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jbpm.workbench.cm.client.list; import java.util.function.Consumer; import javax.annotation.PostConstruct; import javax.enterprise.context.Dependent; import javax.inject.Inject; import com.google.gwt.user.client.TakesValue; import org.jboss.errai.common.client.api.IsElement; import org.jboss.errai.common.client.dom.Anchor; import org.jboss.errai.common.client.dom.Button; import org.jboss.errai.common.client.dom.Div; import org.jboss.errai.common.client.dom.HTMLElement; import org.jboss.errai.common.client.dom.MouseEvent; import org.jboss.errai.common.client.dom.Span; import org.jboss.errai.databinding.client.api.DataBinder; import org.jboss.errai.ui.shared.api.annotations.AutoBound; import org.jboss.errai.ui.shared.api.annotations.Bound; import org.jboss.errai.ui.shared.api.annotations.DataField; import org.jboss.errai.ui.shared.api.annotations.EventHandler; import org.jboss.errai.ui.shared.api.annotations.ForEvent; import org.jboss.errai.ui.shared.api.annotations.Templated; import org.jbpm.workbench.cm.client.util.AbstractView; import org.jbpm.workbench.cm.client.util.CaseStatusLabelConverter; import org.jbpm.workbench.cm.util.CaseStatus; import org.jbpm.workbench.cm.client.util.DateConverter; import org.jbpm.workbench.cm.model.CaseInstanceSummary; import static org.jboss.errai.common.client.dom.DOMUtil.addCSSClass; import static org.jboss.errai.common.client.dom.DOMUtil.removeCSSClass; @Dependent @Templated(stylesheet = "CaseInstanceViewImpl.css") public class CaseInstanceViewImpl extends AbstractView<CaseInstanceListPresenter> implements TakesValue<CaseInstanceSummary>, IsElement { @Inject @DataField("list-item") private Div row; @Inject @DataField("name") @Bound(property = "caseId") @SuppressWarnings("unused") private Span caseId; @Inject @DataField("description") @Bound @SuppressWarnings("unused") private Div description; @Inject @DataField("owner") @Bound @SuppressWarnings("unused") private Span owner; @Inject @DataField("status") @Bound(converter = CaseStatusLabelConverter.class) private Span status; @Inject @DataField("started") @Bound(converter = DateConverter.class) @SuppressWarnings("unused") private Span startedAt; @Inject @DataField("cancel") @SuppressWarnings("unused") private Anchor cancel; @Inject @DataField("close") private Button close; @Inject @DataField("kebab") private Div kebab; @Inject @DataField("case-details") private Div details; @Inject @AutoBound private DataBinder<CaseInstanceSummary> caseInstanceSummary; @PostConstruct public void init() { tooltip(status); } @Override public CaseInstanceSummary getValue() { return caseInstanceSummary.getModel(); } @Override public void setValue(final CaseInstanceSummary model) { this.caseInstanceSummary.setModel(model); executeOnlyIfActive((c) -> { addCSSClass(this.details, "active"); addCSSClass(this.status, "label-success"); removeCSSClass(this.status, "label-default"); removeCSSClass(this.close, "hidden"); removeCSSClass(this.kebab, "hidden"); }); } @Override public HTMLElement getElement() { return row; } @EventHandler("cancel") public void onCancelClick(final @ForEvent("click") MouseEvent event) { executeOnlyIfActive((c) -> presenter.cancelCaseInstance(c)); } @EventHandler("close") public void onCloseClick(final @ForEvent("click") MouseEvent event) { executeOnlyIfActive((c) -> presenter.closeCaseInstance(c)); } @EventHandler("case-details") public void onCaseInstanceClick(final @ForEvent("click") MouseEvent event) { executeOnlyIfActive((c) -> presenter.selectCaseInstance(c)); } private void executeOnlyIfActive(final Consumer<CaseInstanceSummary> consumer) { final CaseInstanceSummary caseInstanceSummary = this.caseInstanceSummary.getModel(); if (caseInstanceSummary.getStatus() == CaseStatus.OPEN) { consumer.accept(caseInstanceSummary); } } }
package com.wugian.sissi.view; import android.content.Context; import android.content.res.Resources; import android.graphics.Bitmap; import android.graphics.Canvas; import android.graphics.Color; import android.graphics.Matrix; import android.graphics.Paint; import android.graphics.drawable.BitmapDrawable; import android.graphics.drawable.Drawable; import android.util.AttributeSet; import android.view.View; import com.wugian.sissi.R; public class SnowView extends View { private static final int NUM_SNOWFLAKES = 48; private static final int DELAY = 5; private SnowFlake[] snowflakes; public SnowView(Context context) { super(context); } public SnowView(Context context, AttributeSet attrs) { super(context, attrs); loadFlower(); } public SnowView(Context context, AttributeSet attrs, int defStyleAttr) { super(context, attrs, defStyleAttr); loadFlower(); } protected void resize(int width, int height) { loadFlower(); Paint paint = new Paint(Paint.ANTI_ALIAS_FLAG); paint.setColor(Color.WHITE); paint.setStyle(Paint.Style.FILL); snowflakes = new SnowFlake[NUM_SNOWFLAKES]; for (int i = 0; i < NUM_SNOWFLAKES; i++) { snowflakes[i] = SnowFlake.create(width, height, paint, mFlowerList[i % mFlowerList.length]); // snowflakes[i].setFlowers(mFlowers); } } Bitmap mFlowers = null; int[] ids = new int[]{R.mipmap.ross_1, R.mipmap.rose_2, R.mipmap.rose_3, R.mipmap.rose_4}; Bitmap[] mFlowerList = new Bitmap[ids.length]; public void loadFlower() { Resources r = this.getContext().getResources(); Drawable drawable = r.getDrawable(R.mipmap.rose_2); mFlowers = (((BitmapDrawable) drawable).getBitmap()); Matrix matrix = new Matrix(); Random random = new Random(); float scale = 0.14f - random.getRandom(0.08f); matrix.postScale(scale, scale); // 得到新的图片 mFlowers = Bitmap.createBitmap(mFlowers, 0, 0, mFlowers.getWidth(), mFlowers.getHeight(), matrix, true); for (int i = 0; i < ids.length; i++) { mFlowerList[i] = loadFlowerItem(ids[i]); } } private Bitmap loadFlowerItem(int id) { Bitmap bm = null; Resources r = this.getContext().getResources(); Drawable drawable = r.getDrawable(id); bm = (((BitmapDrawable) drawable).getBitmap()); Matrix matrix = new Matrix(); Random random = new Random(); float scale = 0.2f - random.getRandom(0.1f); matrix.postScale(scale, scale); // 得到新的图片 bm = Bitmap.createBitmap(bm, 0, 0, bm.getWidth(), bm.getHeight(), matrix, true); return bm; } @Override protected void onSizeChanged(int w, int h, int oldw, int oldh) { super.onSizeChanged(w, h, oldw, oldh); if (w != oldw || h != oldh) { resize(w, h); } } @Override protected void onDraw(Canvas canvas) { super.onDraw(canvas); for (SnowFlake snowFlake : snowflakes) { snowFlake.draw(canvas); } getHandler().postDelayed(runnable, DELAY); } private Runnable runnable = new Runnable() { @Override public void run() { invalidate(); } }; }
/* * Copyright 2021 Slawomir Jaranowski * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.simplify4u.plugins; import java.io.IOException; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.isNull; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoInteractions; import org.apache.maven.execution.MavenSession; import org.apache.maven.plugin.MojoExecutionException; import org.apache.maven.plugin.MojoFailureException; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Spy; import org.mockito.testng.MockitoTestNGListener; import org.simplify4u.plugins.keyserver.PGPKeysCache; import org.slf4j.Logger; import org.testng.annotations.Listeners; import org.testng.annotations.Test; @Listeners(MockitoTestNGListener.class) public class AbstractPGPMojoTest { static class TestMojo extends AbstractPGPMojo { @Override protected String getMojoName() { return "testMojo"; } @Override protected void executeConfiguredMojo() { } } @Mock private Logger logger; @Mock private PGPKeysCache pgpKeysCache; @Mock private MavenSession mavenSession; @Spy @InjectMocks private TestMojo mojo; @Test void getLogThrowException() { assertThatThrownBy(mojo::getLog) .isExactlyInstanceOf(UnsupportedOperationException.class) .hasMessage("SLF4J should be used directly"); } @Test void shouldSkipExecution() throws MojoFailureException, MojoExecutionException { // give mojo.setSkip(true); // when mojo.execute(); // then verify(mojo, never()).executeConfiguredMojo(); verify(logger).info("Skipping pgpverify:{}", "testMojo"); verifyNoInteractions(pgpKeysCache); } @Test void shouldExecute() throws MojoFailureException, MojoExecutionException, IOException { // when mojo.execute(); // then verify(mojo).executeConfiguredMojo(); verify(pgpKeysCache).init(isNull(), isNull(), eq(false), any()); verifyNoInteractions(logger); } }
/* * Copyright 2017 StreamSets Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.streamsets.pipeline.stage.common.mongodb; import com.streamsets.pipeline.api.ErrorCode; import com.streamsets.pipeline.api.GenerateResourceBundle; @GenerateResourceBundle public enum Errors implements ErrorCode { MONGODB_00("Failed to create MongoClientURI: {}"), MONGODB_01("Failed to create MongoClient: {}"), MONGODB_02("Failed to get database: '{}'. {}"), MONGODB_03("Failed to get collection: '{}'. {}"), MONGODB_04("Collection isn't tailable because '{}' is not a capped collection."), MONGODB_05("Offset Field '{}' must be an instance of {}"), MONGODB_06("Error retrieving documents from collection: '{}'. {}"), MONGODB_07("Failed to get <host:port> for '{}'"), MONGODB_08("Failed to parse port: '{}'"), MONGODB_09("Unknown host: '{}'"), MONGODB_10("Failed to parse entry: {}"), MONGODB_11("Offset tracking field: '{}' missing from document: '{}'"), MONGODB_12("Error writing to database: {}"), MONGODB_13("Error serializing record '{}': {}"), MONGODB_14("Unsupported operation type '{}' found in record {}"), MONGODB_15("Operation type (insert, update or delete) is not specified in the header for record {}"), MONGODB_16("Record {} does not contain the expected unique key field {}"), MONGODB_17("Error writing records to Mongo : {}"), MONGODB_18("Operation '{}' requires unique key to be configured"), MONGODB_19("Initial Offset is required when the offset field is ObjectId type"), MONGODB_20("Unknown Offset type : {}"), MONGODB_21("Error parsing {} to Date"), MONGODB_30("Oplog Document Missing the follow mandatory fields '{}'"), MONGODB_31("Oplog Offset Invalid, Cannot parse offset '{}'," + " offset should be of the form 'time_t::ordinal'. Reason {}"), MONGODB_32("Invalid Initial Offset Value for '{}', should be greater than -1 if '{}' is not -1"), MONGODB_33("Invalid Oplog Collection Name '{}', Oplog collection should start with 'oplog.'"), MONGODB_34("Can't create credential object: {}"), ; private final String msg; Errors(String msg) { this.msg = msg; } @Override public String getCode() { return name(); } @Override public String getMessage() { return msg; } }
/* * Copyright (c) 2020-2021 CertifAI Sdn. Bhd. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 * */ package ai.certifai.solution.facial_recognition.identification.feature; import ai.certifai.solution.facial_recognition.detection.FaceLocalization; import ai.certifai.solution.facial_recognition.identification.Prediction; import org.bytedeco.opencv.opencv_core.Mat; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.factory.Nd4j; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; public class FaceFeatureProvider implements IFaceFeatureProvider { @Override public INDArray getEmbeddings(INDArray arr) { return null; } @Override public ArrayList<LabelFeaturePair> setupAnchor(File classDict) throws IOException, ClassNotFoundException { return null; } public List<Prediction> predict(Mat image, FaceLocalization faceLocalization, double threshold, int numSamples) throws IOException { return null; } public int decodeLabelID(INDArray encoded) { int topX; topX = Nd4j.argMax(encoded.getRow(0).dup(), 1).getInt(0); encoded.getRow(0).dup().putScalar(0, topX, 0.0D); return topX; } }
/** * Copyright (C) 2017 Newland Group Holding Limited * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.newlandframework.rpc.compiler; import javax.tools.DiagnosticCollector; import javax.tools.JavaCompiler; import javax.tools.JavaFileObject; import javax.tools.StandardJavaFileManager; import javax.tools.StandardLocation; import javax.tools.ToolProvider; import java.io.Closeable; import java.io.File; import java.io.IOException; import java.net.URL; import java.net.URLClassLoader; import java.util.Arrays; import java.util.Locale; /** * @author tangjie<https://github.com/tang-jie> * @filename:NativeCompiler.java * @description:NativeCompiler功能模块 * @blogs http://www.cnblogs.com/jietang/ * @since 2017/3/30 */ public class NativeCompiler implements Closeable { private final File tempFolder; private final URLClassLoader classLoader; public NativeCompiler(File tempFolder) { this.tempFolder = tempFolder; this.classLoader = createClassLoader(tempFolder); } private static URLClassLoader createClassLoader(File tempFolder) { try { URL[] urls = {tempFolder.toURI().toURL()}; return new URLClassLoader(urls); } catch (Exception e) { throw new AssertionError(e); } } public Class<?> compile(String className, String code) { try { JavaFileObject sourceFile = new StringJavaFileObject(className, code); compileClass(sourceFile); return classLoader.loadClass(className); } catch (Exception e) { throw new AssertionError(e); } } private void compileClass(JavaFileObject sourceFile) throws IOException { JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); DiagnosticCollector<JavaFileObject> collector = new DiagnosticCollector<>(); StandardJavaFileManager fileManager = null; try { fileManager = compiler.getStandardFileManager(collector, Locale.ROOT, null); fileManager.setLocation(StandardLocation.CLASS_OUTPUT, Arrays.asList(tempFolder)); JavaCompiler.CompilationTask task = compiler.getTask(null, fileManager, collector, null, null, Arrays.asList(sourceFile)); task.call(); } finally { fileManager.close(); } } @Override public void close() { try { classLoader.close(); } catch (Exception e) { throw new AssertionError(e); } } }
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.codelibs.fesen.painless.action; import org.codelibs.fesen.common.io.stream.Writeable; import org.codelibs.fesen.common.xcontent.XContentParser; import org.codelibs.fesen.painless.action.PainlessExecuteAction; import org.codelibs.fesen.test.AbstractSerializingTestCase; import java.io.IOException; public class PainlessExecuteResponseTests extends AbstractSerializingTestCase<PainlessExecuteAction.Response> { @Override protected Writeable.Reader<PainlessExecuteAction.Response> instanceReader() { return PainlessExecuteAction.Response::new; } @Override protected PainlessExecuteAction.Response createTestInstance() { Object result; switch (randomIntBetween(0, 2)) { case 0: result = randomAlphaOfLength(10); break; case 1: result = randomBoolean(); break; case 2: result = randomDoubleBetween(-10, 10, true); break; default: throw new IllegalStateException("invalid branch"); } return new PainlessExecuteAction.Response(result); } @Override protected PainlessExecuteAction.Response doParseInstance(XContentParser parser) throws IOException { parser.nextToken(); // START-OBJECT parser.nextToken(); // FIELD-NAME XContentParser.Token token = parser.nextToken(); // result value Object result; switch (token) { case VALUE_STRING: result = parser.text(); break; case VALUE_BOOLEAN: result = parser.booleanValue(); break; case VALUE_NUMBER: result = parser.doubleValue(); break; default: throw new IOException("invalid response"); } return new PainlessExecuteAction.Response(result); } }
// Copyright 2018 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.api.ads.admanager.jaxws.v201802; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlType; /** * * The action used for disapproving {@link Order} objects. All {@link LineItem} * objects within the order will be disapproved as well. * * * <p>Java class for DisapproveOrders complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="DisapproveOrders"> * &lt;complexContent> * &lt;extension base="{https://www.google.com/apis/ads/publisher/v201802}OrderAction"> * &lt;sequence> * &lt;/sequence> * &lt;/extension> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "DisapproveOrders") public class DisapproveOrders extends OrderAction { }
package com.friday.productservice.repository; import com.friday.productservice.entity.Product; import org.springframework.data.jpa.repository.JpaRepository; import org.springframework.stereotype.Repository; @Repository public interface ProductRepository extends JpaRepository<Product, Long> { }
package com.cms.model; import com.cms.model.ChartType; import com.cms.model.Constant; import com.cms.model.CoreEntity; import javax.persistence.*; import java.util.Arrays; import java.util.LinkedList; import java.util.List; /** * Created by sdrahnea */ @Entity @Table(name = "dashboard") public class Dashboard extends CoreEntity { @Lob @Column(name = "cquery") private String query; @Column(name = "ymax") private Long ymax; @Column(name = "ymin") private Long ymin; @JoinColumn(name = "chart_type_id", referencedColumnName = "id") @ManyToOne(fetch = FetchType.EAGER) private ChartType chartType; @Column(name = "legend_position") private String legendPosition; @JoinColumn(name = "show_constant_id", referencedColumnName = "id") @ManyToOne(fetch = FetchType.EAGER) private Constant show; @JoinColumn(name = "animate_constant_id", referencedColumnName = "id") @ManyToOne(fetch = FetchType.EAGER) private Constant animate; @Column(name = "show_column") private String showColumn; @Column(name = "series_tags") private String seriesTags; public List<String> getSeriesList() { return seriesTags != null ? Arrays.asList(seriesTags.split(",")) : new LinkedList<>(); } public void setSeriesList(List<String> skillList) { this.seriesTags = String.join(",", skillList); } public String getQuery() { return query; } public void setQuery(String query) { this.query = query; } public Long getYmax() { return ymax; } public void setYmax(Long ymax) { this.ymax = ymax; } public Long getYmin() { return ymin; } public void setYmin(Long ymin) { this.ymin = ymin; } public String getLegendPosition() { return legendPosition; } public void setLegendPosition(String legendPosition) { this.legendPosition = legendPosition; } public String getSeriesTags() { return seriesTags; } public void setSeriesTags(String seriesTags) { this.seriesTags = seriesTags; } public ChartType getChartType() { return chartType; } public void setChartType(ChartType chartType) { this.chartType = chartType; } public Constant getShow() { return show; } public void setShow(Constant show) { this.show = show; } public Constant getAnimate() { return animate; } public void setAnimate(Constant animate) { this.animate = animate; } public String getShowColumn() { return showColumn; } public void setShowColumn(String showColumn) { this.showColumn = showColumn; } }
package cz.cuni.mff.auv.problem5; import cz.cuni.mff.auv.domain.Domain; import cz.cuni.mff.auv.domain.State; import cz.cuni.mff.auv.problem.AuvProblem; import cz.cuni.mff.auv.problem1.DeadEnd; import cz.cuni.mff.jpddl.PDDLDeadEnd; public final class Problem extends AuvProblem { static { // ENSURE STATIC INITIALIZATION OF THE CLASSES new E_Auvs(); new E_Locations(); new E_Resources(); new E_Ships(); new E_Vehicles(); } public Domain domain; public State state; public Goal goal; public DeadEnd deadEnd; public Problem() { domain = new Domain(); state = new State(); goal = new Goal(); deadEnd = new DeadEnd(); state.p_ActTurn.set(); state.p_Operational.set(E_Auvs.a); state.p_Outside.set(E_Ships.s2); state.p_Outside.set(E_Ships.s); state.p_At.set(E_Auvs.a, E_Locations.l_1_1); state.p_AtRes.set(E_Resources.r2, E_Locations.l_14_12); state.p_AtRes.set(E_Resources.r3, E_Locations.l_16_4); state.p_AtRes.set(E_Resources.r1, E_Locations.l_4_14); state.p_Free.set(E_Locations.l_6_8); state.p_Free.set(E_Locations.l_8_3); state.p_Free.set(E_Locations.l_3_6); state.p_Free.set(E_Locations.l_2_11); state.p_Free.set(E_Locations.l_5_7); state.p_Free.set(E_Locations.l_12_13); state.p_Free.set(E_Locations.l_1_3); state.p_Free.set(E_Locations.l_3_16); state.p_Free.set(E_Locations.l_16_3); state.p_Free.set(E_Locations.l_6_14); state.p_Free.set(E_Locations.l_4_1); state.p_Free.set(E_Locations.l_15_7); state.p_Free.set(E_Locations.l_2_10); state.p_Free.set(E_Locations.l_8_5); state.p_Free.set(E_Locations.l_2_9); state.p_Free.set(E_Locations.l_7_16); state.p_Free.set(E_Locations.l_10_12); state.p_Free.set(E_Locations.l_8_13); state.p_Free.set(E_Locations.l_5_14); state.p_Free.set(E_Locations.l_12_1); state.p_Free.set(E_Locations.l_13_10); state.p_Free.set(E_Locations.l_16_14); state.p_Free.set(E_Locations.l_10_14); state.p_Free.set(E_Locations.l_5_11); state.p_Free.set(E_Locations.l_3_12); state.p_Free.set(E_Locations.l_13_4); state.p_Free.set(E_Locations.l_5_3); state.p_Free.set(E_Locations.l_16_4); state.p_Free.set(E_Locations.l_6_13); state.p_Free.set(E_Locations.l_10_16); state.p_Free.set(E_Locations.l_7_11); state.p_Free.set(E_Locations.l_8_16); state.p_Free.set(E_Locations.l_10_10); state.p_Free.set(E_Locations.l_14_16); state.p_Free.set(E_Locations.l_11_5); state.p_Free.set(E_Locations.l_4_10); state.p_Free.set(E_Locations.l_16_11); state.p_Free.set(E_Locations.l_11_3); state.p_Free.set(E_Locations.l_11_9); state.p_Free.set(E_Locations.l_16_15); state.p_Free.set(E_Locations.l_4_2); state.p_Free.set(E_Locations.l_3_13); state.p_Free.set(E_Locations.l_5_6); state.p_Free.set(E_Locations.l_2_6); state.p_Free.set(E_Locations.l_15_16); state.p_Free.set(E_Locations.l_12_6); state.p_Free.set(E_Locations.l_4_9); state.p_Free.set(E_Locations.l_16_2); state.p_Free.set(E_Locations.l_11_16); state.p_Free.set(E_Locations.l_12_10); state.p_Free.set(E_Locations.l_3_7); state.p_Free.set(E_Locations.l_14_4); state.p_Free.set(E_Locations.l_1_7); state.p_Free.set(E_Locations.l_1_2); state.p_Free.set(E_Locations.l_15_3); state.p_Free.set(E_Locations.l_6_9); state.p_Free.set(E_Locations.l_8_1); state.p_Free.set(E_Locations.l_14_12); state.p_Free.set(E_Locations.l_2_13); state.p_Free.set(E_Locations.l_4_7); state.p_Free.set(E_Locations.l_1_6); state.p_Free.set(E_Locations.l_12_4); state.p_Free.set(E_Locations.l_16_8); state.p_Free.set(E_Locations.l_13_14); state.p_Free.set(E_Locations.l_2_4); state.p_Free.set(E_Locations.l_10_7); state.p_Free.set(E_Locations.l_3_3); state.p_Free.set(E_Locations.l_3_15); state.p_Free.set(E_Locations.l_7_6); state.p_Free.set(E_Locations.l_4_13); state.p_Free.set(E_Locations.l_2_1); state.p_Free.set(E_Locations.l_9_7); state.p_Free.set(E_Locations.l_4_12); state.p_Free.set(E_Locations.l_13_1); state.p_Free.set(E_Locations.l_10_8); state.p_Free.set(E_Locations.l_14_13); state.p_Free.set(E_Locations.l_16_16); state.p_Free.set(E_Locations.l_6_1); state.p_Free.set(E_Locations.l_13_12); state.p_Free.set(E_Locations.l_12_7); state.p_Free.set(E_Locations.l_13_3); state.p_Free.set(E_Locations.l_7_13); state.p_Free.set(E_Locations.l_11_15); state.p_Free.set(E_Locations.l_7_7); state.p_Free.set(E_Locations.l_4_4); state.p_Free.set(E_Locations.l_10_2); state.p_Free.set(E_Locations.l_2_8); state.p_Free.set(E_Locations.l_10_3); state.p_Free.set(E_Locations.l_5_2); state.p_Free.set(E_Locations.l_12_16); state.p_Free.set(E_Locations.l_16_6); state.p_Free.set(E_Locations.l_1_15); state.p_Free.set(E_Locations.l_9_5); state.p_Free.set(E_Locations.l_14_15); state.p_Free.set(E_Locations.l_3_9); state.p_Free.set(E_Locations.l_10_15); state.p_Free.set(E_Locations.l_6_12); state.p_Free.set(E_Locations.l_14_8); state.p_Free.set(E_Locations.l_7_9); state.p_Free.set(E_Locations.l_7_12); state.p_Free.set(E_Locations.l_12_15); state.p_Free.set(E_Locations.l_1_5); state.p_Free.set(E_Locations.l_2_12); state.p_Free.set(E_Locations.l_4_16); state.p_Free.set(E_Locations.l_14_5); state.p_Free.set(E_Locations.l_14_7); state.p_Free.set(E_Locations.l_7_15); state.p_Free.set(E_Locations.l_15_15); state.p_Free.set(E_Locations.l_3_11); state.p_Free.set(E_Locations.l_11_14); state.p_Free.set(E_Locations.l_1_4); state.p_Free.set(E_Locations.l_6_4); state.p_Free.set(E_Locations.l_13_2); state.p_Free.set(E_Locations.l_9_8); state.p_Free.set(E_Locations.l_11_12); state.p_Free.set(E_Locations.l_8_12); state.p_Free.set(E_Locations.l_15_11); state.p_Free.set(E_Locations.l_12_14); state.p_Free.set(E_Locations.l_15_9); state.p_Free.set(E_Locations.l_1_16); state.p_Free.set(E_Locations.l_5_13); state.p_Free.set(E_Locations.l_15_5); state.p_Free.set(E_Locations.l_11_8); state.p_Free.set(E_Locations.l_9_6); state.p_Free.set(E_Locations.l_13_9); state.p_Free.set(E_Locations.l_16_5); state.p_Free.set(E_Locations.l_9_16); state.p_Free.set(E_Locations.l_14_11); state.p_Free.set(E_Locations.l_5_1); state.p_Free.set(E_Locations.l_10_11); state.p_Free.set(E_Locations.l_16_1); state.p_Free.set(E_Locations.l_8_11); state.p_Free.set(E_Locations.l_11_4); state.p_Free.set(E_Locations.l_7_3); state.p_Free.set(E_Locations.l_9_3); state.p_Free.set(E_Locations.l_5_8); state.p_Free.set(E_Locations.l_5_5); state.p_Free.set(E_Locations.l_14_1); state.p_Free.set(E_Locations.l_5_15); state.p_Free.set(E_Locations.l_5_12); state.p_Free.set(E_Locations.l_13_16); state.p_Free.set(E_Locations.l_2_14); state.p_Free.set(E_Locations.l_10_5); state.p_Free.set(E_Locations.l_1_10); state.p_Free.set(E_Locations.l_3_14); state.p_Free.set(E_Locations.l_3_2); state.p_Free.set(E_Locations.l_4_6); state.p_Free.set(E_Locations.l_1_13); state.p_Free.set(E_Locations.l_1_9); state.p_Free.set(E_Locations.l_16_9); state.p_Free.set(E_Locations.l_7_5); state.p_Free.set(E_Locations.l_3_1); state.p_Free.set(E_Locations.l_8_6); state.p_Free.set(E_Locations.l_15_1); state.p_Free.set(E_Locations.l_1_8); state.p_Free.set(E_Locations.l_16_10); state.p_Free.set(E_Locations.l_11_13); state.p_Free.set(E_Locations.l_13_15); state.p_Free.set(E_Locations.l_4_8); state.p_Free.set(E_Locations.l_9_11); state.p_Free.set(E_Locations.l_8_7); state.p_Free.set(E_Locations.l_2_16); state.p_Free.set(E_Locations.l_13_6); state.p_Free.set(E_Locations.l_8_4); state.p_Free.set(E_Locations.l_15_6); state.p_Free.set(E_Locations.l_11_7); state.p_Free.set(E_Locations.l_11_1); state.p_Free.set(E_Locations.l_1_11); state.p_Free.set(E_Locations.l_2_2); state.p_Free.set(E_Locations.l_12_9); state.p_Free.set(E_Locations.l_15_2); state.p_Free.set(E_Locations.l_8_10); state.p_Free.set(E_Locations.l_9_13); state.p_Free.set(E_Locations.l_6_3); state.p_Free.set(E_Locations.l_10_1); state.p_Free.set(E_Locations.l_13_8); state.p_Free.set(E_Locations.l_4_5); state.p_Free.set(E_Locations.l_11_6); state.p_Free.set(E_Locations.l_8_15); state.p_Free.set(E_Locations.l_6_10); state.p_Free.set(E_Locations.l_6_2); state.p_Free.set(E_Locations.l_16_7); state.p_Free.set(E_Locations.l_14_9); state.p_Free.set(E_Locations.l_9_10); state.p_Free.set(E_Locations.l_13_7); state.p_Free.set(E_Locations.l_3_8); state.p_Free.set(E_Locations.l_5_10); state.p_Free.set(E_Locations.l_7_2); state.p_Free.set(E_Locations.l_9_14); state.p_Free.set(E_Locations.l_8_8); state.p_Free.set(E_Locations.l_7_10); state.p_Free.set(E_Locations.l_6_6); state.p_Free.set(E_Locations.l_12_2); state.p_Free.set(E_Locations.l_15_4); state.p_Free.set(E_Locations.l_13_11); state.p_Free.set(E_Locations.l_6_7); state.p_Free.set(E_Locations.l_2_5); state.p_Free.set(E_Locations.l_10_13); state.p_Free.set(E_Locations.l_14_10); state.p_Free.set(E_Locations.l_1_14); state.p_Free.set(E_Locations.l_12_3); state.p_Free.set(E_Locations.l_16_12); state.p_Free.set(E_Locations.l_6_5); state.p_Free.set(E_Locations.l_1_12); state.p_Free.set(E_Locations.l_14_14); state.p_Free.set(E_Locations.l_5_4); state.p_Free.set(E_Locations.l_10_4); state.p_Free.set(E_Locations.l_4_14); state.p_Free.set(E_Locations.l_2_7); state.p_Free.set(E_Locations.l_2_15); state.p_Free.set(E_Locations.l_9_9); state.p_Free.set(E_Locations.l_11_11); state.p_Free.set(E_Locations.l_15_14); state.p_Free.set(E_Locations.l_10_6); state.p_Free.set(E_Locations.l_9_15); state.p_Free.set(E_Locations.l_4_11); state.p_Free.set(E_Locations.l_12_12); state.p_Free.set(E_Locations.l_9_2); state.p_Free.set(E_Locations.l_15_8); state.p_Free.set(E_Locations.l_3_10); state.p_Free.set(E_Locations.l_15_13); state.p_Free.set(E_Locations.l_13_5); state.p_Free.set(E_Locations.l_5_16); state.p_Free.set(E_Locations.l_13_13); state.p_Free.set(E_Locations.l_9_1); state.p_Free.set(E_Locations.l_7_4); state.p_Free.set(E_Locations.l_2_3); state.p_Free.set(E_Locations.l_3_5); state.p_Free.set(E_Locations.l_6_15); state.p_Free.set(E_Locations.l_4_3); state.p_Free.set(E_Locations.l_8_14); state.p_Free.set(E_Locations.l_15_10); state.p_Free.set(E_Locations.l_16_13); state.p_Free.set(E_Locations.l_7_14); state.p_Free.set(E_Locations.l_10_9); state.p_Free.set(E_Locations.l_5_9); state.p_Free.set(E_Locations.l_7_8); state.p_Free.set(E_Locations.l_11_2); state.p_Free.set(E_Locations.l_7_1); state.p_Free.set(E_Locations.l_3_4); state.p_Free.set(E_Locations.l_11_10); state.p_Free.set(E_Locations.l_14_2); state.p_Free.set(E_Locations.l_6_16); state.p_Free.set(E_Locations.l_12_8); state.p_Free.set(E_Locations.l_14_3); state.p_Free.set(E_Locations.l_9_4); state.p_Free.set(E_Locations.l_6_11); state.p_Free.set(E_Locations.l_8_9); state.p_Free.set(E_Locations.l_12_11); state.p_Free.set(E_Locations.l_9_12); state.p_Free.set(E_Locations.l_14_6); state.p_Free.set(E_Locations.l_12_5); state.p_Free.set(E_Locations.l_15_12); state.p_Free.set(E_Locations.l_4_15); state.p_Free.set(E_Locations.l_8_2); state.p_DupFree.set(E_Locations.l_13_12); state.p_DupFree.set(E_Locations.l_6_7); state.p_DupFree.set(E_Locations.l_7_6); state.p_DupFree.set(E_Locations.l_9_5); state.p_DupFree.set(E_Locations.l_1_15); state.p_DupFree.set(E_Locations.l_12_16); state.p_DupFree.set(E_Locations.l_13_14); state.p_DupFree.set(E_Locations.l_6_2); state.p_DupFree.set(E_Locations.l_9_14); state.p_DupFree.set(E_Locations.l_10_3); state.p_DupFree.set(E_Locations.l_16_7); state.p_DupFree.set(E_Locations.l_7_7); state.p_DupFree.set(E_Locations.l_6_10); state.p_DupFree.set(E_Locations.l_8_1); state.p_DupFree.set(E_Locations.l_10_2); state.p_DupFree.set(E_Locations.l_1_7); state.p_DupFree.set(E_Locations.l_4_5); state.p_DupFree.set(E_Locations.l_16_16); state.p_DupFree.set(E_Locations.l_3_7); state.p_DupFree.set(E_Locations.l_1_11); state.p_DupFree.set(E_Locations.l_16_12); state.p_DupFree.set(E_Locations.l_10_8); state.p_DupFree.set(E_Locations.l_12_7); state.p_DupFree.set(E_Locations.l_15_6); state.p_DupFree.set(E_Locations.l_12_2); state.p_DupFree.set(E_Locations.l_4_9); state.p_DupFree.set(E_Locations.l_10_13); state.p_DupFree.set(E_Locations.l_9_7); state.p_DupFree.set(E_Locations.l_2_5); state.p_DupFree.set(E_Locations.l_14_4); state.p_DupFree.set(E_Locations.l_4_13); state.p_DupFree.set(E_Locations.l_2_2); state.p_DupFree.set(E_Locations.l_12_9); state.p_DupFree.set(E_Locations.l_4_2); state.p_DupFree.set(E_Locations.l_2_16); state.p_DupFree.set(E_Locations.l_3_3); state.p_DupFree.set(E_Locations.l_14_9); state.p_DupFree.set(E_Locations.l_13_1); state.p_DupFree.set(E_Locations.l_9_11); state.p_DupFree.set(E_Locations.l_7_10); state.p_DupFree.set(E_Locations.l_11_9); state.p_DupFree.set(E_Locations.l_5_3); state.p_DupFree.set(E_Locations.l_8_15); state.p_DupFree.set(E_Locations.l_12_4); state.p_DupFree.set(E_Locations.l_10_7); state.p_DupFree.set(E_Locations.l_2_4); state.p_DupFree.set(E_Locations.l_3_8); state.p_DupFree.set(E_Locations.l_3_12); state.p_DupFree.set(E_Locations.l_9_10); state.p_DupFree.set(E_Locations.l_1_8); state.p_DupFree.set(E_Locations.l_6_13); state.p_DupFree.set(E_Locations.l_10_12); state.p_DupFree.set(E_Locations.l_13_4); state.p_DupFree.set(E_Locations.l_4_1); state.p_DupFree.set(E_Locations.l_16_10); state.p_DupFree.set(E_Locations.l_6_3); state.p_DupFree.set(E_Locations.l_8_10); state.p_DupFree.set(E_Locations.l_8_6); state.p_DupFree.set(E_Locations.l_2_10); state.p_DupFree.set(E_Locations.l_15_3); state.p_DupFree.set(E_Locations.l_11_16); state.p_DupFree.set(E_Locations.l_15_2); state.p_DupFree.set(E_Locations.l_12_13); state.p_DupFree.set(E_Locations.l_14_16); state.p_DupFree.set(E_Locations.l_9_4); state.p_DupFree.set(E_Locations.l_15_16); state.p_DupFree.set(E_Locations.l_10_16); state.p_DupFree.set(E_Locations.l_6_16); state.p_DupFree.set(E_Locations.l_16_11); state.p_DupFree.set(E_Locations.l_4_10); state.p_DupFree.set(E_Locations.l_11_5); state.p_DupFree.set(E_Locations.l_9_12); state.p_DupFree.set(E_Locations.l_10_9); state.p_DupFree.set(E_Locations.l_14_3); state.p_DupFree.set(E_Locations.l_12_8); state.p_DupFree.set(E_Locations.l_8_16); state.p_DupFree.set(E_Locations.l_10_10); state.p_DupFree.set(E_Locations.l_5_8); state.p_DupFree.set(E_Locations.l_9_3); state.p_DupFree.set(E_Locations.l_7_14); state.p_DupFree.set(E_Locations.l_16_4); state.p_DupFree.set(E_Locations.l_4_8); state.p_DupFree.set(E_Locations.l_14_1); state.p_DupFree.set(E_Locations.l_11_13); state.p_DupFree.set(E_Locations.l_5_11); state.p_DupFree.set(E_Locations.l_6_14); state.p_DupFree.set(E_Locations.l_15_1); state.p_DupFree.set(E_Locations.l_10_14); state.p_DupFree.set(E_Locations.l_5_14); state.p_DupFree.set(E_Locations.l_1_10); state.p_DupFree.set(E_Locations.l_16_9); state.p_DupFree.set(E_Locations.l_10_5); state.p_DupFree.set(E_Locations.l_5_7); state.p_DupFree.set(E_Locations.l_15_5); state.p_DupFree.set(E_Locations.l_5_1); state.p_DupFree.set(E_Locations.l_14_11); state.p_DupFree.set(E_Locations.l_8_2); state.p_DupFree.set(E_Locations.l_1_3); state.p_DupFree.set(E_Locations.l_9_6); state.p_DupFree.set(E_Locations.l_9_2); state.p_DupFree.set(E_Locations.l_4_11); state.p_DupFree.set(E_Locations.l_2_11); state.p_DupFree.set(E_Locations.l_8_9); state.p_DupFree.set(E_Locations.l_13_16); state.p_DupFree.set(E_Locations.l_4_15); state.p_DupFree.set(E_Locations.l_9_9); state.p_DupFree.set(E_Locations.l_2_7); state.p_DupFree.set(E_Locations.l_5_9); state.p_DupFree.set(E_Locations.l_9_15); state.p_DupFree.set(E_Locations.l_15_14); state.p_DupFree.set(E_Locations.l_6_5); state.p_DupFree.set(E_Locations.l_11_12); state.p_DupFree.set(E_Locations.l_12_3); state.p_DupFree.set(E_Locations.l_5_5); state.p_DupFree.set(E_Locations.l_10_4); state.p_DupFree.set(E_Locations.l_7_3); state.p_DupFree.set(E_Locations.l_8_11); state.p_DupFree.set(E_Locations.l_2_12); state.p_DupFree.set(E_Locations.l_1_14); state.p_DupFree.set(E_Locations.l_16_1); state.p_DupFree.set(E_Locations.l_14_7); state.p_DupFree.set(E_Locations.l_8_14); state.p_DupFree.set(E_Locations.l_13_13); state.p_DupFree.set(E_Locations.l_13_9); state.p_DupFree.set(E_Locations.l_5_13); state.p_DupFree.set(E_Locations.l_8_8); state.p_DupFree.set(E_Locations.l_13_11); state.p_DupFree.set(E_Locations.l_15_8); state.p_DupFree.set(E_Locations.l_10_15); state.p_DupFree.set(E_Locations.l_7_4); state.p_DupFree.set(E_Locations.l_5_16); state.p_DupFree.set(E_Locations.l_7_9); state.p_DupFree.set(E_Locations.l_5_2); state.p_DupFree.set(E_Locations.l_14_8); state.p_DupFree.set(E_Locations.l_6_12); state.p_DupFree.set(E_Locations.l_5_4); state.p_DupFree.set(E_Locations.l_6_4); state.p_DupFree.set(E_Locations.l_8_12); state.p_DupFree.set(E_Locations.l_15_15); state.p_DupFree.set(E_Locations.l_4_4); state.p_DupFree.set(E_Locations.l_14_10); state.p_DupFree.set(E_Locations.l_13_2); state.p_DupFree.set(E_Locations.l_10_1); state.p_DupFree.set(E_Locations.l_4_14); state.p_DupFree.set(E_Locations.l_14_14); state.p_DupFree.set(E_Locations.l_11_14); state.p_DupFree.set(E_Locations.l_11_1); state.p_DupFree.set(E_Locations.l_15_4); state.p_DupFree.set(E_Locations.l_11_7); state.p_DupFree.set(E_Locations.l_6_6); state.p_DupFree.set(E_Locations.l_12_15); state.p_DupFree.set(E_Locations.l_3_15); state.p_DupFree.set(E_Locations.l_7_2); state.p_DupFree.set(E_Locations.l_11_6); state.p_DupFree.set(E_Locations.l_5_10); state.p_DupFree.set(E_Locations.l_2_8); state.p_DupFree.set(E_Locations.l_13_7); state.p_DupFree.set(E_Locations.l_13_8); state.p_DupFree.set(E_Locations.l_1_6); state.p_DupFree.set(E_Locations.l_7_5); state.p_DupFree.set(E_Locations.l_13_15); state.p_DupFree.set(E_Locations.l_9_13); state.p_DupFree.set(E_Locations.l_11_15); state.p_DupFree.set(E_Locations.l_1_13); state.p_DupFree.set(E_Locations.l_4_6); state.p_DupFree.set(E_Locations.l_3_2); state.p_DupFree.set(E_Locations.l_1_9); state.p_DupFree.set(E_Locations.l_7_13); state.p_DupFree.set(E_Locations.l_14_13); state.p_DupFree.set(E_Locations.l_14_12); state.p_DupFree.set(E_Locations.l_13_6); state.p_DupFree.set(E_Locations.l_3_14); state.p_DupFree.set(E_Locations.l_4_12); state.p_DupFree.set(E_Locations.l_2_1); state.p_DupFree.set(E_Locations.l_8_4); state.p_DupFree.set(E_Locations.l_11_3); state.p_DupFree.set(E_Locations.l_5_12); state.p_DupFree.set(E_Locations.l_5_15); state.p_DupFree.set(E_Locations.l_8_7); state.p_DupFree.set(E_Locations.l_3_13); state.p_DupFree.set(E_Locations.l_5_6); state.p_DupFree.set(E_Locations.l_16_8); state.p_DupFree.set(E_Locations.l_11_4); state.p_DupFree.set(E_Locations.l_2_13); state.p_DupFree.set(E_Locations.l_7_11); state.p_DupFree.set(E_Locations.l_16_14); state.p_DupFree.set(E_Locations.l_13_10); state.p_DupFree.set(E_Locations.l_12_1); state.p_DupFree.set(E_Locations.l_9_16); state.p_DupFree.set(E_Locations.l_6_9); state.p_DupFree.set(E_Locations.l_1_2); state.p_DupFree.set(E_Locations.l_4_7); state.p_DupFree.set(E_Locations.l_3_1); state.p_DupFree.set(E_Locations.l_11_8); state.p_DupFree.set(E_Locations.l_16_3); state.p_DupFree.set(E_Locations.l_2_9); state.p_DupFree.set(E_Locations.l_15_7); state.p_DupFree.set(E_Locations.l_1_16); state.p_DupFree.set(E_Locations.l_12_10); state.p_DupFree.set(E_Locations.l_15_9); state.p_DupFree.set(E_Locations.l_16_2); state.p_DupFree.set(E_Locations.l_12_14); state.p_DupFree.set(E_Locations.l_12_6); state.p_DupFree.set(E_Locations.l_2_14); state.p_DupFree.set(E_Locations.l_15_12); state.p_DupFree.set(E_Locations.l_12_5); state.p_DupFree.set(E_Locations.l_14_6); state.p_DupFree.set(E_Locations.l_6_11); state.p_DupFree.set(E_Locations.l_6_8); state.p_DupFree.set(E_Locations.l_9_8); state.p_DupFree.set(E_Locations.l_2_6); state.p_DupFree.set(E_Locations.l_16_15); state.p_DupFree.set(E_Locations.l_1_4); state.p_DupFree.set(E_Locations.l_7_15); state.p_DupFree.set(E_Locations.l_10_11); state.p_DupFree.set(E_Locations.l_14_5); state.p_DupFree.set(E_Locations.l_11_10); state.p_DupFree.set(E_Locations.l_8_13); state.p_DupFree.set(E_Locations.l_16_5); state.p_DupFree.set(E_Locations.l_16_13); state.p_DupFree.set(E_Locations.l_11_2); state.p_DupFree.set(E_Locations.l_3_16); state.p_DupFree.set(E_Locations.l_4_3); state.p_DupFree.set(E_Locations.l_6_15); state.p_DupFree.set(E_Locations.l_7_16); state.p_DupFree.set(E_Locations.l_3_5); state.p_DupFree.set(E_Locations.l_8_5); state.p_DupFree.set(E_Locations.l_15_10); state.p_DupFree.set(E_Locations.l_1_5); state.p_DupFree.set(E_Locations.l_8_3); state.p_DupFree.set(E_Locations.l_2_3); state.p_DupFree.set(E_Locations.l_9_1); state.p_DupFree.set(E_Locations.l_15_11); state.p_DupFree.set(E_Locations.l_13_5); state.p_DupFree.set(E_Locations.l_15_13); state.p_DupFree.set(E_Locations.l_16_6); state.p_DupFree.set(E_Locations.l_7_12); state.p_DupFree.set(E_Locations.l_3_6); state.p_DupFree.set(E_Locations.l_10_6); state.p_DupFree.set(E_Locations.l_3_9); state.p_DupFree.set(E_Locations.l_14_15); state.p_DupFree.set(E_Locations.l_11_11); state.p_DupFree.set(E_Locations.l_14_2); state.p_DupFree.set(E_Locations.l_3_10); state.p_DupFree.set(E_Locations.l_3_4); state.p_DupFree.set(E_Locations.l_12_12); state.p_DupFree.set(E_Locations.l_12_11); state.p_DupFree.set(E_Locations.l_1_12); state.p_DupFree.set(E_Locations.l_13_3); state.p_DupFree.set(E_Locations.l_4_16); state.p_DupFree.set(E_Locations.l_7_1); state.p_DupFree.set(E_Locations.l_2_15); state.p_DupFree.set(E_Locations.l_3_11); state.p_DupFree.set(E_Locations.l_6_1); state.p_DupFree.set(E_Locations.l_7_8); state.p_Connected.set(E_Locations.l_6_8, E_Locations.l_6_7); state.p_Connected.set(E_Locations.l_6_6, E_Locations.l_7_6); state.p_Connected.set(E_Locations.l_4_14, E_Locations.l_4_13); state.p_Connected.set(E_Locations.l_15_12, E_Locations.l_16_12); state.p_Connected.set(E_Locations.l_6_15, E_Locations.l_6_14); state.p_Connected.set(E_Locations.l_3_2, E_Locations.l_2_2); state.p_Connected.set(E_Locations.l_4_12, E_Locations.l_4_13); state.p_Connected.set(E_Locations.l_8_14, E_Locations.l_9_14); state.p_Connected.set(E_Locations.l_5_5, E_Locations.l_5_6); state.p_Connected.set(E_Locations.l_7_5, E_Locations.l_7_4); state.p_Connected.set(E_Locations.l_5_10, E_Locations.l_4_10); state.p_Connected.set(E_Locations.l_13_15, E_Locations.l_12_15); state.p_Connected.set(E_Locations.l_10_8, E_Locations.l_9_8); state.p_Connected.set(E_Locations.l_2_2, E_Locations.l_2_3); state.p_Connected.set(E_Locations.l_11_10, E_Locations.l_11_9); state.p_Connected.set(E_Locations.l_10_11, E_Locations.l_10_12); state.p_Connected.set(E_Locations.l_4_1, E_Locations.l_4_2); state.p_Connected.set(E_Locations.l_11_6, E_Locations.l_10_6); state.p_Connected.set(E_Locations.l_1_9, E_Locations.l_2_9); state.p_Connected.set(E_Locations.l_5_7, E_Locations.l_5_8); state.p_Connected.set(E_Locations.l_6_7, E_Locations.l_6_6); state.p_Connected.set(E_Locations.l_2_9, E_Locations.l_2_10); state.p_Connected.set(E_Locations.l_10_1, E_Locations.l_9_1); state.p_Connected.set(E_Locations.l_9_10, E_Locations.l_8_10); state.p_Connected.set(E_Locations.l_12_11, E_Locations.l_11_11); state.p_Connected.set(E_Locations.l_4_8, E_Locations.l_4_7); state.p_Connected.set(E_Locations.l_7_6, E_Locations.l_8_6); state.p_Connected.set(E_Locations.l_4_3, E_Locations.l_4_4); state.p_Connected.set(E_Locations.l_5_12, E_Locations.l_6_12); state.p_Connected.set(E_Locations.l_11_6, E_Locations.l_12_6); state.p_Connected.set(E_Locations.l_15_2, E_Locations.l_14_2); state.p_Connected.set(E_Locations.l_5_4, E_Locations.l_6_4); state.p_Connected.set(E_Locations.l_15_11, E_Locations.l_16_11); state.p_Connected.set(E_Locations.l_10_16, E_Locations.l_9_16); state.p_Connected.set(E_Locations.l_1_14, E_Locations.l_2_14); state.p_Connected.set(E_Locations.l_12_9, E_Locations.l_12_10); state.p_Connected.set(E_Locations.l_14_5, E_Locations.l_14_6); state.p_Connected.set(E_Locations.l_15_2, E_Locations.l_16_2); state.p_Connected.set(E_Locations.l_13_7, E_Locations.l_13_8); state.p_Connected.set(E_Locations.l_13_9, E_Locations.l_14_9); state.p_Connected.set(E_Locations.l_7_11, E_Locations.l_6_11); state.p_Connected.set(E_Locations.l_14_12, E_Locations.l_14_11); state.p_Connected.set(E_Locations.l_9_5, E_Locations.l_8_5); state.p_Connected.set(E_Locations.l_16_3, E_Locations.l_16_4); state.p_Connected.set(E_Locations.l_7_5, E_Locations.l_6_5); state.p_Connected.set(E_Locations.l_7_16, E_Locations.l_6_16); state.p_Connected.set(E_Locations.l_1_15, E_Locations.l_1_16); state.p_Connected.set(E_Locations.l_2_9, E_Locations.l_2_8); state.p_Connected.set(E_Locations.l_8_6, E_Locations.l_8_7); state.p_Connected.set(E_Locations.l_13_7, E_Locations.l_13_6); state.p_Connected.set(E_Locations.l_16_8, E_Locations.l_16_9); state.p_Connected.set(E_Locations.l_2_7, E_Locations.l_1_7); state.p_Connected.set(E_Locations.l_4_14, E_Locations.l_5_14); state.p_Connected.set(E_Locations.l_6_13, E_Locations.l_5_13); state.p_Connected.set(E_Locations.l_4_12, E_Locations.l_4_11); state.p_Connected.set(E_Locations.l_12_6, E_Locations.l_11_6); state.p_Connected.set(E_Locations.l_9_11, E_Locations.l_10_11); state.p_Connected.set(E_Locations.l_6_1, E_Locations.l_7_1); state.p_Connected.set(E_Locations.l_5_6, E_Locations.l_5_5); state.p_Connected.set(E_Locations.l_13_14, E_Locations.l_13_15); state.p_Connected.set(E_Locations.l_6_2, E_Locations.l_6_3); state.p_Connected.set(E_Locations.l_6_7, E_Locations.l_6_8); state.p_Connected.set(E_Locations.l_7_12, E_Locations.l_6_12); state.p_Connected.set(E_Locations.l_4_3, E_Locations.l_5_3); state.p_Connected.set(E_Locations.l_2_12, E_Locations.l_2_11); state.p_Connected.set(E_Locations.l_6_4, E_Locations.l_6_5); state.p_Connected.set(E_Locations.l_12_5, E_Locations.l_12_4); state.p_Connected.set(E_Locations.l_14_1, E_Locations.l_14_2); state.p_Connected.set(E_Locations.l_16_6, E_Locations.l_16_7); state.p_Connected.set(E_Locations.l_14_7, E_Locations.l_14_8); state.p_Connected.set(E_Locations.l_10_11, E_Locations.l_11_11); state.p_Connected.set(E_Locations.l_3_12, E_Locations.l_2_12); state.p_Connected.set(E_Locations.l_2_1, E_Locations.l_3_1); state.p_Connected.set(E_Locations.l_7_2, E_Locations.l_8_2); state.p_Connected.set(E_Locations.l_9_6, E_Locations.l_9_5); state.p_Connected.set(E_Locations.l_9_8, E_Locations.l_9_7); state.p_Connected.set(E_Locations.l_1_4, E_Locations.l_2_4); state.p_Connected.set(E_Locations.l_16_16, E_Locations.l_15_16); state.p_Connected.set(E_Locations.l_15_4, E_Locations.l_15_3); state.p_Connected.set(E_Locations.l_16_8, E_Locations.l_16_7); state.p_Connected.set(E_Locations.l_13_4, E_Locations.l_14_4); state.p_Connected.set(E_Locations.l_13_11, E_Locations.l_12_11); state.p_Connected.set(E_Locations.l_4_7, E_Locations.l_5_7); state.p_Connected.set(E_Locations.l_8_8, E_Locations.l_8_9); state.p_Connected.set(E_Locations.l_13_9, E_Locations.l_12_9); state.p_Connected.set(E_Locations.l_13_15, E_Locations.l_13_16); state.p_Connected.set(E_Locations.l_8_11, E_Locations.l_9_11); state.p_Connected.set(E_Locations.l_13_4, E_Locations.l_13_3); state.p_Connected.set(E_Locations.l_13_16, E_Locations.l_12_16); state.p_Connected.set(E_Locations.l_3_11, E_Locations.l_4_11); state.p_Connected.set(E_Locations.l_12_7, E_Locations.l_11_7); state.p_Connected.set(E_Locations.l_10_13, E_Locations.l_9_13); state.p_Connected.set(E_Locations.l_2_4, E_Locations.l_2_3); state.p_Connected.set(E_Locations.l_14_13, E_Locations.l_14_12); state.p_Connected.set(E_Locations.l_10_15, E_Locations.l_10_16); state.p_Connected.set(E_Locations.l_13_12, E_Locations.l_12_12); state.p_Connected.set(E_Locations.l_15_10, E_Locations.l_15_11); state.p_Connected.set(E_Locations.l_7_3, E_Locations.l_8_3); state.p_Connected.set(E_Locations.l_8_1, E_Locations.l_8_2); state.p_Connected.set(E_Locations.l_11_6, E_Locations.l_11_5); state.p_Connected.set(E_Locations.l_14_8, E_Locations.l_14_9); state.p_Connected.set(E_Locations.l_3_9, E_Locations.l_4_9); state.p_Connected.set(E_Locations.l_14_8, E_Locations.l_13_8); state.p_Connected.set(E_Locations.l_10_5, E_Locations.l_11_5); state.p_Connected.set(E_Locations.l_10_4, E_Locations.l_9_4); state.p_Connected.set(E_Locations.l_6_10, E_Locations.l_6_9); state.p_Connected.set(E_Locations.l_3_11, E_Locations.l_3_12); state.p_Connected.set(E_Locations.l_6_5, E_Locations.l_7_5); state.p_Connected.set(E_Locations.l_7_10, E_Locations.l_6_10); state.p_Connected.set(E_Locations.l_5_16, E_Locations.l_6_16); state.p_Connected.set(E_Locations.l_1_2, E_Locations.l_2_2); state.p_Connected.set(E_Locations.l_5_2, E_Locations.l_5_3); state.p_Connected.set(E_Locations.l_14_12, E_Locations.l_13_12); state.p_Connected.set(E_Locations.l_3_16, E_Locations.l_3_15); state.p_Connected.set(E_Locations.l_14_16, E_Locations.l_13_16); state.p_Connected.set(E_Locations.l_11_16, E_Locations.l_12_16); state.p_Connected.set(E_Locations.l_10_10, E_Locations.l_11_10); state.p_Connected.set(E_Locations.l_10_12, E_Locations.l_9_12); state.p_Connected.set(E_Locations.l_7_12, E_Locations.l_8_12); state.p_Connected.set(E_Locations.l_7_10, E_Locations.l_8_10); state.p_Connected.set(E_Locations.l_3_6, E_Locations.l_3_5); state.p_Connected.set(E_Locations.l_3_3, E_Locations.l_3_4); state.p_Connected.set(E_Locations.l_10_4, E_Locations.l_10_5); state.p_Connected.set(E_Locations.l_16_2, E_Locations.l_16_3); state.p_Connected.set(E_Locations.l_16_12, E_Locations.l_16_13); state.p_Connected.set(E_Locations.l_10_5, E_Locations.l_10_4); state.p_Connected.set(E_Locations.l_8_16, E_Locations.l_8_15); state.p_Connected.set(E_Locations.l_13_6, E_Locations.l_13_7); state.p_Connected.set(E_Locations.l_4_15, E_Locations.l_5_15); state.p_Connected.set(E_Locations.l_9_4, E_Locations.l_9_3); state.p_Connected.set(E_Locations.l_15_7, E_Locations.l_14_7); state.p_Connected.set(E_Locations.l_12_8, E_Locations.l_12_9); state.p_Connected.set(E_Locations.l_4_10, E_Locations.l_5_10); state.p_Connected.set(E_Locations.l_5_1, E_Locations.l_4_1); state.p_Connected.set(E_Locations.l_8_2, E_Locations.l_7_2); state.p_Connected.set(E_Locations.l_13_4, E_Locations.l_12_4); state.p_Connected.set(E_Locations.l_11_4, E_Locations.l_12_4); state.p_Connected.set(E_Locations.l_10_16, E_Locations.l_11_16); state.p_Connected.set(E_Locations.l_2_3, E_Locations.l_3_3); state.p_Connected.set(E_Locations.l_10_12, E_Locations.l_10_13); state.p_Connected.set(E_Locations.l_5_8, E_Locations.l_6_8); state.p_Connected.set(E_Locations.l_10_3, E_Locations.l_10_4); state.p_Connected.set(E_Locations.l_13_6, E_Locations.l_13_5); state.p_Connected.set(E_Locations.l_3_8, E_Locations.l_3_7); state.p_Connected.set(E_Locations.l_7_7, E_Locations.l_8_7); state.p_Connected.set(E_Locations.l_13_8, E_Locations.l_13_7); state.p_Connected.set(E_Locations.l_14_9, E_Locations.l_15_9); state.p_Connected.set(E_Locations.l_8_15, E_Locations.l_9_15); state.p_Connected.set(E_Locations.l_10_12, E_Locations.l_10_11); state.p_Connected.set(E_Locations.l_12_6, E_Locations.l_12_7); state.p_Connected.set(E_Locations.l_12_5, E_Locations.l_11_5); state.p_Connected.set(E_Locations.l_1_6, E_Locations.l_1_7); state.p_Connected.set(E_Locations.l_2_13, E_Locations.l_2_14); state.p_Connected.set(E_Locations.l_3_4, E_Locations.l_2_4); state.p_Connected.set(E_Locations.l_14_12, E_Locations.l_14_13); state.p_Connected.set(E_Locations.l_4_15, E_Locations.l_4_16); state.p_Connected.set(E_Locations.l_14_14, E_Locations.l_13_14); state.p_Connected.set(E_Locations.l_11_15, E_Locations.l_10_15); state.p_Connected.set(E_Locations.l_8_16, E_Locations.l_9_16); state.p_Connected.set(E_Locations.l_13_10, E_Locations.l_13_11); state.p_Connected.set(E_Locations.l_11_8, E_Locations.l_12_8); state.p_Connected.set(E_Locations.l_9_2, E_Locations.l_9_1); state.p_Connected.set(E_Locations.l_5_12, E_Locations.l_4_12); state.p_Connected.set(E_Locations.l_11_13, E_Locations.l_11_14); state.p_Connected.set(E_Locations.l_7_7, E_Locations.l_7_8); state.p_Connected.set(E_Locations.l_4_10, E_Locations.l_4_9); state.p_Connected.set(E_Locations.l_9_3, E_Locations.l_8_3); state.p_Connected.set(E_Locations.l_6_12, E_Locations.l_6_11); state.p_Connected.set(E_Locations.l_1_2, E_Locations.l_1_3); state.p_Connected.set(E_Locations.l_11_9, E_Locations.l_11_8); state.p_Connected.set(E_Locations.l_10_8, E_Locations.l_10_7); state.p_Connected.set(E_Locations.l_13_13, E_Locations.l_13_14); state.p_Connected.set(E_Locations.l_8_5, E_Locations.l_9_5); state.p_Connected.set(E_Locations.l_11_10, E_Locations.l_12_10); state.p_Connected.set(E_Locations.l_1_10, E_Locations.l_1_11); state.p_Connected.set(E_Locations.l_13_1, E_Locations.l_13_2); state.p_Connected.set(E_Locations.l_7_9, E_Locations.l_7_8); state.p_Connected.set(E_Locations.l_6_15, E_Locations.l_6_16); state.p_Connected.set(E_Locations.l_15_6, E_Locations.l_16_6); state.p_Connected.set(E_Locations.l_2_14, E_Locations.l_1_14); state.p_Connected.set(E_Locations.l_12_6, E_Locations.l_12_5); state.p_Connected.set(E_Locations.l_6_11, E_Locations.l_5_11); state.p_Connected.set(E_Locations.l_9_13, E_Locations.l_9_14); state.p_Connected.set(E_Locations.l_6_5, E_Locations.l_6_4); state.p_Connected.set(E_Locations.l_2_13, E_Locations.l_1_13); state.p_Connected.set(E_Locations.l_5_16, E_Locations.l_5_15); state.p_Connected.set(E_Locations.l_11_6, E_Locations.l_11_7); state.p_Connected.set(E_Locations.l_11_11, E_Locations.l_10_11); state.p_Connected.set(E_Locations.l_2_7, E_Locations.l_2_6); state.p_Connected.set(E_Locations.l_6_2, E_Locations.l_5_2); state.p_Connected.set(E_Locations.l_14_2, E_Locations.l_15_2); state.p_Connected.set(E_Locations.l_16_14, E_Locations.l_16_13); state.p_Connected.set(E_Locations.l_1_8, E_Locations.l_1_9); state.p_Connected.set(E_Locations.l_3_1, E_Locations.l_2_1); state.p_Connected.set(E_Locations.l_10_14, E_Locations.l_11_14); state.p_Connected.set(E_Locations.l_15_7, E_Locations.l_16_7); state.p_Connected.set(E_Locations.l_3_13, E_Locations.l_4_13); state.p_Connected.set(E_Locations.l_5_8, E_Locations.l_4_8); state.p_Connected.set(E_Locations.l_8_11, E_Locations.l_8_10); state.p_Connected.set(E_Locations.l_3_13, E_Locations.l_3_14); state.p_Connected.set(E_Locations.l_7_4, E_Locations.l_7_3); state.p_Connected.set(E_Locations.l_6_10, E_Locations.l_6_11); state.p_Connected.set(E_Locations.l_3_11, E_Locations.l_2_11); state.p_Connected.set(E_Locations.l_7_3, E_Locations.l_6_3); state.p_Connected.set(E_Locations.l_14_12, E_Locations.l_15_12); state.p_Connected.set(E_Locations.l_1_14, E_Locations.l_1_13); state.p_Connected.set(E_Locations.l_3_14, E_Locations.l_3_13); state.p_Connected.set(E_Locations.l_7_6, E_Locations.l_7_5); state.p_Connected.set(E_Locations.l_13_10, E_Locations.l_14_10); state.p_Connected.set(E_Locations.l_7_2, E_Locations.l_7_1); state.p_Connected.set(E_Locations.l_5_7, E_Locations.l_6_7); state.p_Connected.set(E_Locations.l_9_12, E_Locations.l_10_12); state.p_Connected.set(E_Locations.l_16_5, E_Locations.l_16_6); state.p_Connected.set(E_Locations.l_14_6, E_Locations.l_15_6); state.p_Connected.set(E_Locations.l_6_8, E_Locations.l_5_8); state.p_Connected.set(E_Locations.l_5_5, E_Locations.l_6_5); state.p_Connected.set(E_Locations.l_7_12, E_Locations.l_7_13); state.p_Connected.set(E_Locations.l_10_12, E_Locations.l_11_12); state.p_Connected.set(E_Locations.l_9_6, E_Locations.l_10_6); state.p_Connected.set(E_Locations.l_12_9, E_Locations.l_13_9); state.p_Connected.set(E_Locations.l_2_14, E_Locations.l_2_15); state.p_Connected.set(E_Locations.l_4_15, E_Locations.l_4_14); state.p_Connected.set(E_Locations.l_9_3, E_Locations.l_10_3); state.p_Connected.set(E_Locations.l_14_10, E_Locations.l_14_9); state.p_Connected.set(E_Locations.l_13_1, E_Locations.l_14_1); state.p_Connected.set(E_Locations.l_1_11, E_Locations.l_1_12); state.p_Connected.set(E_Locations.l_4_13, E_Locations.l_5_13); state.p_Connected.set(E_Locations.l_2_7, E_Locations.l_2_8); state.p_Connected.set(E_Locations.l_6_11, E_Locations.l_7_11); state.p_Connected.set(E_Locations.l_15_8, E_Locations.l_16_8); state.p_Connected.set(E_Locations.l_10_13, E_Locations.l_11_13); state.p_Connected.set(E_Locations.l_16_13, E_Locations.l_15_13); state.p_Connected.set(E_Locations.l_3_10, E_Locations.l_3_11); state.p_Connected.set(E_Locations.l_2_8, E_Locations.l_2_9); state.p_Connected.set(E_Locations.l_13_7, E_Locations.l_12_7); state.p_Connected.set(E_Locations.l_9_7, E_Locations.l_8_7); state.p_Connected.set(E_Locations.l_2_12, E_Locations.l_2_13); state.p_Connected.set(E_Locations.l_16_6, E_Locations.l_15_6); state.p_Connected.set(E_Locations.l_2_11, E_Locations.l_2_10); state.p_Connected.set(E_Locations.l_5_14, E_Locations.l_5_15); state.p_Connected.set(E_Locations.l_5_14, E_Locations.l_5_13); state.p_Connected.set(E_Locations.l_7_8, E_Locations.l_7_9); state.p_Connected.set(E_Locations.l_16_9, E_Locations.l_16_10); state.p_Connected.set(E_Locations.l_9_15, E_Locations.l_8_15); state.p_Connected.set(E_Locations.l_4_8, E_Locations.l_5_8); state.p_Connected.set(E_Locations.l_5_6, E_Locations.l_4_6); state.p_Connected.set(E_Locations.l_2_5, E_Locations.l_1_5); state.p_Connected.set(E_Locations.l_13_2, E_Locations.l_14_2); state.p_Connected.set(E_Locations.l_15_11, E_Locations.l_14_11); state.p_Connected.set(E_Locations.l_15_6, E_Locations.l_15_5); state.p_Connected.set(E_Locations.l_11_12, E_Locations.l_12_12); state.p_Connected.set(E_Locations.l_15_11, E_Locations.l_15_10); state.p_Connected.set(E_Locations.l_9_13, E_Locations.l_9_12); state.p_Connected.set(E_Locations.l_15_1, E_Locations.l_16_1); state.p_Connected.set(E_Locations.l_13_12, E_Locations.l_13_11); state.p_Connected.set(E_Locations.l_16_14, E_Locations.l_15_14); state.p_Connected.set(E_Locations.l_10_9, E_Locations.l_11_9); state.p_Connected.set(E_Locations.l_12_10, E_Locations.l_13_10); state.p_Connected.set(E_Locations.l_2_1, E_Locations.l_2_2); state.p_Connected.set(E_Locations.l_2_15, E_Locations.l_1_15); state.p_Connected.set(E_Locations.l_12_16, E_Locations.l_13_16); state.p_Connected.set(E_Locations.l_7_13, E_Locations.l_6_13); state.p_Connected.set(E_Locations.l_8_4, E_Locations.l_8_5); state.p_Connected.set(E_Locations.l_4_9, E_Locations.l_4_10); state.p_Connected.set(E_Locations.l_10_7, E_Locations.l_11_7); state.p_Connected.set(E_Locations.l_15_5, E_Locations.l_16_5); state.p_Connected.set(E_Locations.l_3_6, E_Locations.l_2_6); state.p_Connected.set(E_Locations.l_6_8, E_Locations.l_7_8); state.p_Connected.set(E_Locations.l_11_12, E_Locations.l_10_12); state.p_Connected.set(E_Locations.l_11_8, E_Locations.l_10_8); state.p_Connected.set(E_Locations.l_15_7, E_Locations.l_15_6); state.p_Connected.set(E_Locations.l_4_1, E_Locations.l_3_1); state.p_Connected.set(E_Locations.l_6_5, E_Locations.l_5_5); state.p_Connected.set(E_Locations.l_1_2, E_Locations.l_1_1); state.p_Connected.set(E_Locations.l_5_1, E_Locations.l_5_2); state.p_Connected.set(E_Locations.l_8_7, E_Locations.l_8_8); state.p_Connected.set(E_Locations.l_16_3, E_Locations.l_16_2); state.p_Connected.set(E_Locations.l_9_1, E_Locations.l_10_1); state.p_Connected.set(E_Locations.l_10_6, E_Locations.l_11_6); state.p_Connected.set(E_Locations.l_15_11, E_Locations.l_15_12); state.p_Connected.set(E_Locations.l_1_12, E_Locations.l_1_13); state.p_Connected.set(E_Locations.l_3_9, E_Locations.l_3_10); state.p_Connected.set(E_Locations.l_10_2, E_Locations.l_11_2); state.p_Connected.set(E_Locations.l_11_4, E_Locations.l_11_5); state.p_Connected.set(E_Locations.l_1_4, E_Locations.l_1_3); state.p_Connected.set(E_Locations.l_10_11, E_Locations.l_9_11); state.p_Connected.set(E_Locations.l_10_14, E_Locations.l_10_15); state.p_Connected.set(E_Locations.l_11_12, E_Locations.l_11_13); state.p_Connected.set(E_Locations.l_9_15, E_Locations.l_9_14); state.p_Connected.set(E_Locations.l_13_5, E_Locations.l_14_5); state.p_Connected.set(E_Locations.l_11_14, E_Locations.l_11_13); state.p_Connected.set(E_Locations.l_1_1, E_Locations.l_1_2); state.p_Connected.set(E_Locations.l_7_8, E_Locations.l_7_7); state.p_Connected.set(E_Locations.l_12_12, E_Locations.l_12_11); state.p_Connected.set(E_Locations.l_15_8, E_Locations.l_15_7); state.p_Connected.set(E_Locations.l_13_14, E_Locations.l_12_14); state.p_Connected.set(E_Locations.l_3_12, E_Locations.l_4_12); state.p_Connected.set(E_Locations.l_2_6, E_Locations.l_3_6); state.p_Connected.set(E_Locations.l_15_4, E_Locations.l_14_4); state.p_Connected.set(E_Locations.l_5_12, E_Locations.l_5_11); state.p_Connected.set(E_Locations.l_15_15, E_Locations.l_16_15); state.p_Connected.set(E_Locations.l_15_16, E_Locations.l_16_16); state.p_Connected.set(E_Locations.l_13_1, E_Locations.l_12_1); state.p_Connected.set(E_Locations.l_1_13, E_Locations.l_2_13); state.p_Connected.set(E_Locations.l_5_15, E_Locations.l_5_14); state.p_Connected.set(E_Locations.l_11_9, E_Locations.l_10_9); state.p_Connected.set(E_Locations.l_13_15, E_Locations.l_14_15); state.p_Connected.set(E_Locations.l_7_5, E_Locations.l_8_5); state.p_Connected.set(E_Locations.l_9_8, E_Locations.l_8_8); state.p_Connected.set(E_Locations.l_7_16, E_Locations.l_7_15); state.p_Connected.set(E_Locations.l_15_4, E_Locations.l_15_5); state.p_Connected.set(E_Locations.l_9_11, E_Locations.l_9_10); state.p_Connected.set(E_Locations.l_9_3, E_Locations.l_9_2); state.p_Connected.set(E_Locations.l_11_7, E_Locations.l_10_7); state.p_Connected.set(E_Locations.l_10_6, E_Locations.l_9_6); state.p_Connected.set(E_Locations.l_7_13, E_Locations.l_7_12); state.p_Connected.set(E_Locations.l_14_1, E_Locations.l_13_1); state.p_Connected.set(E_Locations.l_10_14, E_Locations.l_9_14); state.p_Connected.set(E_Locations.l_4_4, E_Locations.l_3_4); state.p_Connected.set(E_Locations.l_13_10, E_Locations.l_13_9); state.p_Connected.set(E_Locations.l_14_7, E_Locations.l_14_6); state.p_Connected.set(E_Locations.l_15_3, E_Locations.l_14_3); state.p_Connected.set(E_Locations.l_6_16, E_Locations.l_6_15); state.p_Connected.set(E_Locations.l_2_9, E_Locations.l_3_9); state.p_Connected.set(E_Locations.l_6_1, E_Locations.l_6_2); state.p_Connected.set(E_Locations.l_16_13, E_Locations.l_16_14); state.p_Connected.set(E_Locations.l_15_8, E_Locations.l_14_8); state.p_Connected.set(E_Locations.l_1_12, E_Locations.l_2_12); state.p_Connected.set(E_Locations.l_8_9, E_Locations.l_8_10); state.p_Connected.set(E_Locations.l_15_9, E_Locations.l_14_9); state.p_Connected.set(E_Locations.l_14_14, E_Locations.l_15_14); state.p_Connected.set(E_Locations.l_15_14, E_Locations.l_14_14); state.p_Connected.set(E_Locations.l_4_4, E_Locations.l_4_3); state.p_Connected.set(E_Locations.l_15_10, E_Locations.l_16_10); state.p_Connected.set(E_Locations.l_13_15, E_Locations.l_13_14); state.p_Connected.set(E_Locations.l_3_10, E_Locations.l_4_10); state.p_Connected.set(E_Locations.l_11_2, E_Locations.l_11_3); state.p_Connected.set(E_Locations.l_15_16, E_Locations.l_15_15); state.p_Connected.set(E_Locations.l_7_14, E_Locations.l_7_13); state.p_Connected.set(E_Locations.l_14_2, E_Locations.l_13_2); state.p_Connected.set(E_Locations.l_15_12, E_Locations.l_15_13); state.p_Connected.set(E_Locations.l_6_14, E_Locations.l_6_15); state.p_Connected.set(E_Locations.l_3_4, E_Locations.l_3_5); state.p_Connected.set(E_Locations.l_11_5, E_Locations.l_10_5); state.p_Connected.set(E_Locations.l_14_13, E_Locations.l_13_13); state.p_Connected.set(E_Locations.l_12_12, E_Locations.l_12_13); state.p_Connected.set(E_Locations.l_6_7, E_Locations.l_7_7); state.p_Connected.set(E_Locations.l_1_11, E_Locations.l_1_10); state.p_Connected.set(E_Locations.l_8_15, E_Locations.l_8_16); state.p_Connected.set(E_Locations.l_11_11, E_Locations.l_11_10); state.p_Connected.set(E_Locations.l_1_15, E_Locations.l_2_15); state.p_Connected.set(E_Locations.l_9_9, E_Locations.l_10_9); state.p_Connected.set(E_Locations.l_8_10, E_Locations.l_8_9); state.p_Connected.set(E_Locations.l_1_4, E_Locations.l_1_5); state.p_Connected.set(E_Locations.l_5_4, E_Locations.l_5_5); state.p_Connected.set(E_Locations.l_4_14, E_Locations.l_3_14); state.p_Connected.set(E_Locations.l_10_5, E_Locations.l_10_6); state.p_Connected.set(E_Locations.l_1_1, E_Locations.l_2_1); state.p_Connected.set(E_Locations.l_13_2, E_Locations.l_13_3); state.p_Connected.set(E_Locations.l_7_4, E_Locations.l_8_4); state.p_Connected.set(E_Locations.l_15_14, E_Locations.l_15_15); state.p_Connected.set(E_Locations.l_3_5, E_Locations.l_3_6); state.p_Connected.set(E_Locations.l_16_11, E_Locations.l_15_11); state.p_Connected.set(E_Locations.l_12_2, E_Locations.l_11_2); state.p_Connected.set(E_Locations.l_5_10, E_Locations.l_5_9); state.p_Connected.set(E_Locations.l_6_12, E_Locations.l_5_12); state.p_Connected.set(E_Locations.l_6_6, E_Locations.l_6_7); state.p_Connected.set(E_Locations.l_10_8, E_Locations.l_10_9); state.p_Connected.set(E_Locations.l_3_14, E_Locations.l_4_14); state.p_Connected.set(E_Locations.l_13_10, E_Locations.l_12_10); state.p_Connected.set(E_Locations.l_6_8, E_Locations.l_6_9); state.p_Connected.set(E_Locations.l_4_11, E_Locations.l_4_10); state.p_Connected.set(E_Locations.l_4_8, E_Locations.l_4_9); state.p_Connected.set(E_Locations.l_6_10, E_Locations.l_5_10); state.p_Connected.set(E_Locations.l_11_5, E_Locations.l_11_6); state.p_Connected.set(E_Locations.l_15_6, E_Locations.l_14_6); state.p_Connected.set(E_Locations.l_2_2, E_Locations.l_2_1); state.p_Connected.set(E_Locations.l_3_2, E_Locations.l_3_1); state.p_Connected.set(E_Locations.l_4_7, E_Locations.l_4_8); state.p_Connected.set(E_Locations.l_16_1, E_Locations.l_16_2); state.p_Connected.set(E_Locations.l_2_10, E_Locations.l_3_10); state.p_Connected.set(E_Locations.l_8_12, E_Locations.l_8_11); state.p_Connected.set(E_Locations.l_5_14, E_Locations.l_6_14); state.p_Connected.set(E_Locations.l_6_14, E_Locations.l_6_13); state.p_Connected.set(E_Locations.l_14_14, E_Locations.l_14_15); state.p_Connected.set(E_Locations.l_8_11, E_Locations.l_8_12); state.p_Connected.set(E_Locations.l_13_16, E_Locations.l_14_16); state.p_Connected.set(E_Locations.l_9_15, E_Locations.l_9_16); state.p_Connected.set(E_Locations.l_1_6, E_Locations.l_1_5); state.p_Connected.set(E_Locations.l_13_2, E_Locations.l_12_2); state.p_Connected.set(E_Locations.l_14_11, E_Locations.l_14_10); state.p_Connected.set(E_Locations.l_10_11, E_Locations.l_10_10); state.p_Connected.set(E_Locations.l_3_9, E_Locations.l_2_9); state.p_Connected.set(E_Locations.l_2_5, E_Locations.l_2_6); state.p_Connected.set(E_Locations.l_12_3, E_Locations.l_11_3); state.p_Connected.set(E_Locations.l_12_11, E_Locations.l_13_11); state.p_Connected.set(E_Locations.l_6_9, E_Locations.l_6_10); state.p_Connected.set(E_Locations.l_9_9, E_Locations.l_9_10); state.p_Connected.set(E_Locations.l_10_6, E_Locations.l_10_5); state.p_Connected.set(E_Locations.l_2_11, E_Locations.l_3_11); state.p_Connected.set(E_Locations.l_7_2, E_Locations.l_6_2); state.p_Connected.set(E_Locations.l_13_3, E_Locations.l_13_4); state.p_Connected.set(E_Locations.l_6_9, E_Locations.l_7_9); state.p_Connected.set(E_Locations.l_9_5, E_Locations.l_9_6); state.p_Connected.set(E_Locations.l_16_3, E_Locations.l_15_3); state.p_Connected.set(E_Locations.l_5_13, E_Locations.l_5_14); state.p_Connected.set(E_Locations.l_2_2, E_Locations.l_3_2); state.p_Connected.set(E_Locations.l_6_2, E_Locations.l_6_1); state.p_Connected.set(E_Locations.l_10_4, E_Locations.l_11_4); state.p_Connected.set(E_Locations.l_8_12, E_Locations.l_9_12); state.p_Connected.set(E_Locations.l_11_2, E_Locations.l_10_2); state.p_Connected.set(E_Locations.l_8_10, E_Locations.l_8_11); state.p_Connected.set(E_Locations.l_6_12, E_Locations.l_7_12); state.p_Connected.set(E_Locations.l_14_10, E_Locations.l_14_11); state.p_Connected.set(E_Locations.l_13_14, E_Locations.l_14_14); state.p_Connected.set(E_Locations.l_3_3, E_Locations.l_2_3); state.p_Connected.set(E_Locations.l_11_9, E_Locations.l_12_9); state.p_Connected.set(E_Locations.l_5_2, E_Locations.l_4_2); state.p_Connected.set(E_Locations.l_11_13, E_Locations.l_12_13); state.p_Connected.set(E_Locations.l_12_10, E_Locations.l_11_10); state.p_Connected.set(E_Locations.l_8_9, E_Locations.l_8_8); state.p_Connected.set(E_Locations.l_8_5, E_Locations.l_7_5); state.p_Connected.set(E_Locations.l_8_6, E_Locations.l_8_5); state.p_Connected.set(E_Locations.l_2_12, E_Locations.l_1_12); state.p_Connected.set(E_Locations.l_6_16, E_Locations.l_5_16); state.p_Connected.set(E_Locations.l_4_2, E_Locations.l_4_3); state.p_Connected.set(E_Locations.l_3_3, E_Locations.l_3_2); state.p_Connected.set(E_Locations.l_5_3, E_Locations.l_4_3); state.p_Connected.set(E_Locations.l_16_13, E_Locations.l_16_12); state.p_Connected.set(E_Locations.l_12_6, E_Locations.l_13_6); state.p_Connected.set(E_Locations.l_15_14, E_Locations.l_15_13); state.p_Connected.set(E_Locations.l_8_7, E_Locations.l_7_7); state.p_Connected.set(E_Locations.l_4_5, E_Locations.l_4_6); state.p_Connected.set(E_Locations.l_16_2, E_Locations.l_15_2); state.p_Connected.set(E_Locations.l_9_3, E_Locations.l_9_4); state.p_Connected.set(E_Locations.l_3_3, E_Locations.l_4_3); state.p_Connected.set(E_Locations.l_12_2, E_Locations.l_13_2); state.p_Connected.set(E_Locations.l_7_14, E_Locations.l_8_14); state.p_Connected.set(E_Locations.l_9_2, E_Locations.l_8_2); state.p_Connected.set(E_Locations.l_11_3, E_Locations.l_11_4); state.p_Connected.set(E_Locations.l_16_9, E_Locations.l_16_8); state.p_Connected.set(E_Locations.l_4_13, E_Locations.l_4_14); state.p_Connected.set(E_Locations.l_5_5, E_Locations.l_4_5); state.p_Connected.set(E_Locations.l_2_3, E_Locations.l_1_3); state.p_Connected.set(E_Locations.l_14_3, E_Locations.l_14_2); state.p_Connected.set(E_Locations.l_12_9, E_Locations.l_12_8); state.p_Connected.set(E_Locations.l_5_4, E_Locations.l_4_4); state.p_Connected.set(E_Locations.l_4_14, E_Locations.l_4_15); state.p_Connected.set(E_Locations.l_4_3, E_Locations.l_3_3); state.p_Connected.set(E_Locations.l_14_6, E_Locations.l_13_6); state.p_Connected.set(E_Locations.l_6_15, E_Locations.l_5_15); state.p_Connected.set(E_Locations.l_12_7, E_Locations.l_13_7); state.p_Connected.set(E_Locations.l_3_15, E_Locations.l_4_15); state.p_Connected.set(E_Locations.l_4_15, E_Locations.l_3_15); state.p_Connected.set(E_Locations.l_11_13, E_Locations.l_11_12); state.p_Connected.set(E_Locations.l_2_16, E_Locations.l_2_15); state.p_Connected.set(E_Locations.l_8_15, E_Locations.l_8_14); state.p_Connected.set(E_Locations.l_16_5, E_Locations.l_15_5); state.p_Connected.set(E_Locations.l_6_16, E_Locations.l_7_16); state.p_Connected.set(E_Locations.l_14_2, E_Locations.l_14_1); state.p_Connected.set(E_Locations.l_8_4, E_Locations.l_9_4); state.p_Connected.set(E_Locations.l_16_11, E_Locations.l_16_12); state.p_Connected.set(E_Locations.l_13_5, E_Locations.l_12_5); state.p_Connected.set(E_Locations.l_2_10, E_Locations.l_2_9); state.p_Connected.set(E_Locations.l_2_6, E_Locations.l_2_5); state.p_Connected.set(E_Locations.l_6_3, E_Locations.l_5_3); state.p_Connected.set(E_Locations.l_10_7, E_Locations.l_10_8); state.p_Connected.set(E_Locations.l_8_14, E_Locations.l_8_15); state.p_Connected.set(E_Locations.l_14_15, E_Locations.l_15_15); state.p_Connected.set(E_Locations.l_12_1, E_Locations.l_11_1); state.p_Connected.set(E_Locations.l_12_16, E_Locations.l_11_16); state.p_Connected.set(E_Locations.l_11_4, E_Locations.l_11_3); state.p_Connected.set(E_Locations.l_15_4, E_Locations.l_16_4); state.p_Connected.set(E_Locations.l_1_8, E_Locations.l_2_8); state.p_Connected.set(E_Locations.l_5_8, E_Locations.l_5_7); state.p_Connected.set(E_Locations.l_4_9, E_Locations.l_5_9); state.p_Connected.set(E_Locations.l_15_10, E_Locations.l_14_10); state.p_Connected.set(E_Locations.l_3_16, E_Locations.l_2_16); state.p_Connected.set(E_Locations.l_7_6, E_Locations.l_7_7); state.p_Connected.set(E_Locations.l_14_14, E_Locations.l_14_13); state.p_Connected.set(E_Locations.l_14_13, E_Locations.l_14_14); state.p_Connected.set(E_Locations.l_7_9, E_Locations.l_6_9); state.p_Connected.set(E_Locations.l_10_10, E_Locations.l_10_11); state.p_Connected.set(E_Locations.l_11_2, E_Locations.l_11_1); state.p_Connected.set(E_Locations.l_9_4, E_Locations.l_10_4); state.p_Connected.set(E_Locations.l_5_9, E_Locations.l_4_9); state.p_Connected.set(E_Locations.l_2_8, E_Locations.l_1_8); state.p_Connected.set(E_Locations.l_10_10, E_Locations.l_9_10); state.p_Connected.set(E_Locations.l_3_7, E_Locations.l_4_7); state.p_Connected.set(E_Locations.l_3_8, E_Locations.l_3_9); state.p_Connected.set(E_Locations.l_7_13, E_Locations.l_7_14); state.p_Connected.set(E_Locations.l_3_5, E_Locations.l_2_5); state.p_Connected.set(E_Locations.l_14_16, E_Locations.l_15_16); state.p_Connected.set(E_Locations.l_3_13, E_Locations.l_3_12); state.p_Connected.set(E_Locations.l_14_9, E_Locations.l_14_10); state.p_Connected.set(E_Locations.l_7_3, E_Locations.l_7_2); state.p_Connected.set(E_Locations.l_9_13, E_Locations.l_8_13); state.p_Connected.set(E_Locations.l_6_1, E_Locations.l_5_1); state.p_Connected.set(E_Locations.l_9_9, E_Locations.l_9_8); state.p_Connected.set(E_Locations.l_8_2, E_Locations.l_8_3); state.p_Connected.set(E_Locations.l_9_7, E_Locations.l_10_7); state.p_Connected.set(E_Locations.l_4_12, E_Locations.l_5_12); state.p_Connected.set(E_Locations.l_8_15, E_Locations.l_7_15); state.p_Connected.set(E_Locations.l_3_1, E_Locations.l_4_1); state.p_Connected.set(E_Locations.l_7_3, E_Locations.l_7_4); state.p_Connected.set(E_Locations.l_11_1, E_Locations.l_12_1); state.p_Connected.set(E_Locations.l_11_14, E_Locations.l_10_14); state.p_Connected.set(E_Locations.l_5_11, E_Locations.l_5_12); state.p_Connected.set(E_Locations.l_1_15, E_Locations.l_1_14); state.p_Connected.set(E_Locations.l_11_16, E_Locations.l_10_16); state.p_Connected.set(E_Locations.l_6_11, E_Locations.l_6_12); state.p_Connected.set(E_Locations.l_15_3, E_Locations.l_16_3); state.p_Connected.set(E_Locations.l_11_7, E_Locations.l_11_6); state.p_Connected.set(E_Locations.l_4_1, E_Locations.l_5_1); state.p_Connected.set(E_Locations.l_12_3, E_Locations.l_13_3); state.p_Connected.set(E_Locations.l_5_15, E_Locations.l_4_15); state.p_Connected.set(E_Locations.l_15_2, E_Locations.l_15_1); state.p_Connected.set(E_Locations.l_1_5, E_Locations.l_2_5); state.p_Connected.set(E_Locations.l_13_7, E_Locations.l_14_7); state.p_Connected.set(E_Locations.l_6_9, E_Locations.l_5_9); state.p_Connected.set(E_Locations.l_7_4, E_Locations.l_6_4); state.p_Connected.set(E_Locations.l_4_3, E_Locations.l_4_2); state.p_Connected.set(E_Locations.l_4_11, E_Locations.l_4_12); state.p_Connected.set(E_Locations.l_14_4, E_Locations.l_14_5); state.p_Connected.set(E_Locations.l_1_3, E_Locations.l_2_3); state.p_Connected.set(E_Locations.l_12_8, E_Locations.l_11_8); state.p_Connected.set(E_Locations.l_5_14, E_Locations.l_4_14); state.p_Connected.set(E_Locations.l_3_12, E_Locations.l_3_11); state.p_Connected.set(E_Locations.l_13_12, E_Locations.l_14_12); state.p_Connected.set(E_Locations.l_3_13, E_Locations.l_2_13); state.p_Connected.set(E_Locations.l_5_10, E_Locations.l_5_11); state.p_Connected.set(E_Locations.l_12_15, E_Locations.l_12_14); state.p_Connected.set(E_Locations.l_14_5, E_Locations.l_14_4); state.p_Connected.set(E_Locations.l_13_4, E_Locations.l_13_5); state.p_Connected.set(E_Locations.l_4_16, E_Locations.l_4_15); state.p_Connected.set(E_Locations.l_2_11, E_Locations.l_1_11); state.p_Connected.set(E_Locations.l_14_3, E_Locations.l_14_4); state.p_Connected.set(E_Locations.l_7_14, E_Locations.l_7_15); state.p_Connected.set(E_Locations.l_11_11, E_Locations.l_12_11); state.p_Connected.set(E_Locations.l_4_6, E_Locations.l_3_6); state.p_Connected.set(E_Locations.l_14_3, E_Locations.l_13_3); state.p_Connected.set(E_Locations.l_8_6, E_Locations.l_9_6); state.p_Connected.set(E_Locations.l_2_3, E_Locations.l_2_4); state.p_Connected.set(E_Locations.l_2_4, E_Locations.l_1_4); state.p_Connected.set(E_Locations.l_10_1, E_Locations.l_10_2); state.p_Connected.set(E_Locations.l_10_3, E_Locations.l_11_3); state.p_Connected.set(E_Locations.l_14_1, E_Locations.l_15_1); state.p_Connected.set(E_Locations.l_14_9, E_Locations.l_14_8); state.p_Connected.set(E_Locations.l_16_10, E_Locations.l_15_10); state.p_Connected.set(E_Locations.l_9_10, E_Locations.l_9_9); state.p_Connected.set(E_Locations.l_16_11, E_Locations.l_16_10); state.p_Connected.set(E_Locations.l_5_3, E_Locations.l_5_2); state.p_Connected.set(E_Locations.l_10_9, E_Locations.l_10_8); state.p_Connected.set(E_Locations.l_13_14, E_Locations.l_13_13); state.p_Connected.set(E_Locations.l_7_7, E_Locations.l_6_7); state.p_Connected.set(E_Locations.l_6_12, E_Locations.l_6_13); state.p_Connected.set(E_Locations.l_1_3, E_Locations.l_1_2); state.p_Connected.set(E_Locations.l_16_15, E_Locations.l_15_15); state.p_Connected.set(E_Locations.l_5_11, E_Locations.l_5_10); state.p_Connected.set(E_Locations.l_2_12, E_Locations.l_3_12); state.p_Connected.set(E_Locations.l_1_7, E_Locations.l_2_7); state.p_Connected.set(E_Locations.l_2_14, E_Locations.l_3_14); state.p_Connected.set(E_Locations.l_11_3, E_Locations.l_11_2); state.p_Connected.set(E_Locations.l_6_14, E_Locations.l_7_14); state.p_Connected.set(E_Locations.l_9_16, E_Locations.l_9_15); state.p_Connected.set(E_Locations.l_7_2, E_Locations.l_7_3); state.p_Connected.set(E_Locations.l_11_7, E_Locations.l_12_7); state.p_Connected.set(E_Locations.l_10_15, E_Locations.l_9_15); state.p_Connected.set(E_Locations.l_7_9, E_Locations.l_7_10); state.p_Connected.set(E_Locations.l_8_9, E_Locations.l_9_9); state.p_Connected.set(E_Locations.l_5_6, E_Locations.l_6_6); state.p_Connected.set(E_Locations.l_12_8, E_Locations.l_13_8); state.p_Connected.set(E_Locations.l_7_15, E_Locations.l_8_15); state.p_Connected.set(E_Locations.l_10_9, E_Locations.l_9_9); state.p_Connected.set(E_Locations.l_9_14, E_Locations.l_9_13); state.p_Connected.set(E_Locations.l_11_1, E_Locations.l_10_1); state.p_Connected.set(E_Locations.l_6_4, E_Locations.l_5_4); state.p_Connected.set(E_Locations.l_9_6, E_Locations.l_8_6); state.p_Connected.set(E_Locations.l_14_4, E_Locations.l_14_3); state.p_Connected.set(E_Locations.l_11_14, E_Locations.l_11_15); state.p_Connected.set(E_Locations.l_6_5, E_Locations.l_6_6); state.p_Connected.set(E_Locations.l_7_11, E_Locations.l_8_11); state.p_Connected.set(E_Locations.l_3_9, E_Locations.l_3_8); state.p_Connected.set(E_Locations.l_15_10, E_Locations.l_15_9); state.p_Connected.set(E_Locations.l_11_7, E_Locations.l_11_8); state.p_Connected.set(E_Locations.l_4_2, E_Locations.l_4_1); state.p_Connected.set(E_Locations.l_8_2, E_Locations.l_9_2); state.p_Connected.set(E_Locations.l_12_14, E_Locations.l_13_14); state.p_Connected.set(E_Locations.l_7_13, E_Locations.l_8_13); state.p_Connected.set(E_Locations.l_11_1, E_Locations.l_11_2); state.p_Connected.set(E_Locations.l_4_2, E_Locations.l_3_2); state.p_Connected.set(E_Locations.l_16_2, E_Locations.l_16_1); state.p_Connected.set(E_Locations.l_8_6, E_Locations.l_7_6); state.p_Connected.set(E_Locations.l_2_15, E_Locations.l_3_15); state.p_Connected.set(E_Locations.l_10_10, E_Locations.l_10_9); state.p_Connected.set(E_Locations.l_2_4, E_Locations.l_2_5); state.p_Connected.set(E_Locations.l_1_12, E_Locations.l_1_11); state.p_Connected.set(E_Locations.l_3_4, E_Locations.l_4_4); state.p_Connected.set(E_Locations.l_8_12, E_Locations.l_8_13); state.p_Connected.set(E_Locations.l_7_5, E_Locations.l_7_6); state.p_Connected.set(E_Locations.l_15_15, E_Locations.l_14_15); state.p_Connected.set(E_Locations.l_8_7, E_Locations.l_9_7); state.p_Connected.set(E_Locations.l_2_9, E_Locations.l_1_9); state.p_Connected.set(E_Locations.l_16_10, E_Locations.l_16_9); state.p_Connected.set(E_Locations.l_13_3, E_Locations.l_12_3); state.p_Connected.set(E_Locations.l_16_15, E_Locations.l_16_14); state.p_Connected.set(E_Locations.l_11_8, E_Locations.l_11_9); state.p_Connected.set(E_Locations.l_10_4, E_Locations.l_10_3); state.p_Connected.set(E_Locations.l_9_2, E_Locations.l_9_3); state.p_Connected.set(E_Locations.l_4_4, E_Locations.l_4_5); state.p_Connected.set(E_Locations.l_8_4, E_Locations.l_7_4); state.p_Connected.set(E_Locations.l_12_2, E_Locations.l_12_3); state.p_Connected.set(E_Locations.l_2_2, E_Locations.l_1_2); state.p_Connected.set(E_Locations.l_3_11, E_Locations.l_3_10); state.p_Connected.set(E_Locations.l_16_7, E_Locations.l_15_7); state.p_Connected.set(E_Locations.l_7_8, E_Locations.l_6_8); state.p_Connected.set(E_Locations.l_2_11, E_Locations.l_2_12); state.p_Connected.set(E_Locations.l_4_7, E_Locations.l_3_7); state.p_Connected.set(E_Locations.l_15_9, E_Locations.l_16_9); state.p_Connected.set(E_Locations.l_8_3, E_Locations.l_7_3); state.p_Connected.set(E_Locations.l_3_16, E_Locations.l_4_16); state.p_Connected.set(E_Locations.l_8_4, E_Locations.l_8_3); state.p_Connected.set(E_Locations.l_5_2, E_Locations.l_6_2); state.p_Connected.set(E_Locations.l_8_8, E_Locations.l_9_8); state.p_Connected.set(E_Locations.l_15_2, E_Locations.l_15_3); state.p_Connected.set(E_Locations.l_1_13, E_Locations.l_1_12); state.p_Connected.set(E_Locations.l_13_9, E_Locations.l_13_8); state.p_Connected.set(E_Locations.l_2_4, E_Locations.l_3_4); state.p_Connected.set(E_Locations.l_14_6, E_Locations.l_14_5); state.p_Connected.set(E_Locations.l_12_12, E_Locations.l_11_12); state.p_Connected.set(E_Locations.l_7_1, E_Locations.l_7_2); state.p_Connected.set(E_Locations.l_9_10, E_Locations.l_9_11); state.p_Connected.set(E_Locations.l_9_7, E_Locations.l_9_6); state.p_Connected.set(E_Locations.l_15_13, E_Locations.l_15_14); state.p_Connected.set(E_Locations.l_16_6, E_Locations.l_16_5); state.p_Connected.set(E_Locations.l_15_15, E_Locations.l_15_16); state.p_Connected.set(E_Locations.l_1_9, E_Locations.l_1_10); state.p_Connected.set(E_Locations.l_1_10, E_Locations.l_1_9); state.p_Connected.set(E_Locations.l_11_15, E_Locations.l_11_16); state.p_Connected.set(E_Locations.l_2_5, E_Locations.l_3_5); state.p_Connected.set(E_Locations.l_12_4, E_Locations.l_13_4); state.p_Connected.set(E_Locations.l_14_10, E_Locations.l_13_10); state.p_Connected.set(E_Locations.l_1_7, E_Locations.l_1_8); state.p_Connected.set(E_Locations.l_12_4, E_Locations.l_11_4); state.p_Connected.set(E_Locations.l_4_6, E_Locations.l_4_5); state.p_Connected.set(E_Locations.l_7_15, E_Locations.l_7_16); state.p_Connected.set(E_Locations.l_2_1, E_Locations.l_1_1); state.p_Connected.set(E_Locations.l_6_3, E_Locations.l_7_3); state.p_Connected.set(E_Locations.l_2_6, E_Locations.l_1_6); state.p_Connected.set(E_Locations.l_6_2, E_Locations.l_7_2); state.p_Connected.set(E_Locations.l_15_13, E_Locations.l_16_13); state.p_Connected.set(E_Locations.l_5_3, E_Locations.l_5_4); state.p_Connected.set(E_Locations.l_2_14, E_Locations.l_2_13); state.p_Connected.set(E_Locations.l_5_12, E_Locations.l_5_13); state.p_Connected.set(E_Locations.l_9_5, E_Locations.l_9_4); state.p_Connected.set(E_Locations.l_6_15, E_Locations.l_7_15); state.p_Connected.set(E_Locations.l_14_2, E_Locations.l_14_3); state.p_Connected.set(E_Locations.l_4_4, E_Locations.l_5_4); state.p_Connected.set(E_Locations.l_8_9, E_Locations.l_7_9); state.p_Connected.set(E_Locations.l_9_11, E_Locations.l_9_12); state.p_Connected.set(E_Locations.l_9_8, E_Locations.l_9_9); state.p_Connected.set(E_Locations.l_7_15, E_Locations.l_6_15); state.p_Connected.set(E_Locations.l_3_6, E_Locations.l_4_6); state.p_Connected.set(E_Locations.l_2_16, E_Locations.l_1_16); state.p_Connected.set(E_Locations.l_15_1, E_Locations.l_15_2); state.p_Connected.set(E_Locations.l_10_7, E_Locations.l_9_7); state.p_Connected.set(E_Locations.l_11_2, E_Locations.l_12_2); state.p_Connected.set(E_Locations.l_8_2, E_Locations.l_8_1); state.p_Connected.set(E_Locations.l_9_1, E_Locations.l_9_2); state.p_Connected.set(E_Locations.l_3_5, E_Locations.l_3_4); state.p_Connected.set(E_Locations.l_7_10, E_Locations.l_7_9); state.p_Connected.set(E_Locations.l_15_5, E_Locations.l_15_6); state.p_Connected.set(E_Locations.l_7_6, E_Locations.l_6_6); state.p_Connected.set(E_Locations.l_15_12, E_Locations.l_15_11); state.p_Connected.set(E_Locations.l_8_13, E_Locations.l_7_13); state.p_Connected.set(E_Locations.l_1_16, E_Locations.l_2_16); state.p_Connected.set(E_Locations.l_2_8, E_Locations.l_3_8); state.p_Connected.set(E_Locations.l_3_2, E_Locations.l_3_3); state.p_Connected.set(E_Locations.l_5_6, E_Locations.l_5_7); state.p_Connected.set(E_Locations.l_15_15, E_Locations.l_15_14); state.p_Connected.set(E_Locations.l_13_2, E_Locations.l_13_1); state.p_Connected.set(E_Locations.l_7_11, E_Locations.l_7_12); state.p_Connected.set(E_Locations.l_12_7, E_Locations.l_12_6); state.p_Connected.set(E_Locations.l_12_13, E_Locations.l_13_13); state.p_Connected.set(E_Locations.l_10_2, E_Locations.l_10_3); state.p_Connected.set(E_Locations.l_5_8, E_Locations.l_5_9); state.p_Connected.set(E_Locations.l_3_15, E_Locations.l_2_15); state.p_Connected.set(E_Locations.l_10_13, E_Locations.l_10_14); state.p_Connected.set(E_Locations.l_5_9, E_Locations.l_6_9); state.p_Connected.set(E_Locations.l_8_3, E_Locations.l_8_2); state.p_Connected.set(E_Locations.l_11_10, E_Locations.l_11_11); state.p_Connected.set(E_Locations.l_6_13, E_Locations.l_7_13); state.p_Connected.set(E_Locations.l_11_16, E_Locations.l_11_15); state.p_Connected.set(E_Locations.l_5_9, E_Locations.l_5_8); state.p_Connected.set(E_Locations.l_12_15, E_Locations.l_11_15); state.p_Connected.set(E_Locations.l_1_5, E_Locations.l_1_6); state.p_Connected.set(E_Locations.l_1_10, E_Locations.l_2_10); state.p_Connected.set(E_Locations.l_10_2, E_Locations.l_9_2); state.p_Connected.set(E_Locations.l_12_16, E_Locations.l_12_15); state.p_Connected.set(E_Locations.l_13_11, E_Locations.l_13_10); state.p_Connected.set(E_Locations.l_10_9, E_Locations.l_10_10); state.p_Connected.set(E_Locations.l_5_7, E_Locations.l_4_7); state.p_Connected.set(E_Locations.l_9_12, E_Locations.l_9_11); state.p_Connected.set(E_Locations.l_16_7, E_Locations.l_16_8); state.p_Connected.set(E_Locations.l_9_16, E_Locations.l_10_16); state.p_Connected.set(E_Locations.l_14_10, E_Locations.l_15_10); state.p_Connected.set(E_Locations.l_3_10, E_Locations.l_2_10); state.p_Connected.set(E_Locations.l_12_10, E_Locations.l_12_11); state.p_Connected.set(E_Locations.l_11_5, E_Locations.l_11_4); state.p_Connected.set(E_Locations.l_13_13, E_Locations.l_14_13); state.p_Connected.set(E_Locations.l_4_13, E_Locations.l_4_12); state.p_Connected.set(E_Locations.l_2_3, E_Locations.l_2_2); state.p_Connected.set(E_Locations.l_6_6, E_Locations.l_5_6); state.p_Connected.set(E_Locations.l_12_3, E_Locations.l_12_2); state.p_Connected.set(E_Locations.l_4_16, E_Locations.l_3_16); state.p_Connected.set(E_Locations.l_8_10, E_Locations.l_7_10); state.p_Connected.set(E_Locations.l_10_3, E_Locations.l_10_2); state.p_Connected.set(E_Locations.l_13_8, E_Locations.l_12_8); state.p_Connected.set(E_Locations.l_3_1, E_Locations.l_3_2); state.p_Connected.set(E_Locations.l_5_16, E_Locations.l_4_16); state.p_Connected.set(E_Locations.l_10_1, E_Locations.l_11_1); state.p_Connected.set(E_Locations.l_13_8, E_Locations.l_14_8); state.p_Connected.set(E_Locations.l_6_6, E_Locations.l_6_5); state.p_Connected.set(E_Locations.l_12_11, E_Locations.l_12_10); state.p_Connected.set(E_Locations.l_3_14, E_Locations.l_3_15); state.p_Connected.set(E_Locations.l_7_10, E_Locations.l_7_11); state.p_Connected.set(E_Locations.l_3_7, E_Locations.l_3_6); state.p_Connected.set(E_Locations.l_14_7, E_Locations.l_13_7); state.p_Connected.set(E_Locations.l_13_6, E_Locations.l_14_6); state.p_Connected.set(E_Locations.l_12_5, E_Locations.l_13_5); state.p_Connected.set(E_Locations.l_9_11, E_Locations.l_8_11); state.p_Connected.set(E_Locations.l_12_1, E_Locations.l_13_1); state.p_Connected.set(E_Locations.l_14_5, E_Locations.l_15_5); state.p_Connected.set(E_Locations.l_13_5, E_Locations.l_13_4); state.p_Connected.set(E_Locations.l_10_5, E_Locations.l_9_5); state.p_Connected.set(E_Locations.l_10_14, E_Locations.l_10_13); state.p_Connected.set(E_Locations.l_13_13, E_Locations.l_12_13); state.p_Connected.set(E_Locations.l_8_13, E_Locations.l_8_12); state.p_Connected.set(E_Locations.l_6_11, E_Locations.l_6_10); state.p_Connected.set(E_Locations.l_2_16, E_Locations.l_3_16); state.p_Connected.set(E_Locations.l_11_15, E_Locations.l_11_14); state.p_Connected.set(E_Locations.l_2_13, E_Locations.l_2_12); state.p_Connected.set(E_Locations.l_4_10, E_Locations.l_4_11); state.p_Connected.set(E_Locations.l_16_10, E_Locations.l_16_11); state.p_Connected.set(E_Locations.l_15_13, E_Locations.l_15_12); state.p_Connected.set(E_Locations.l_6_3, E_Locations.l_6_4); state.p_Connected.set(E_Locations.l_3_8, E_Locations.l_2_8); state.p_Connected.set(E_Locations.l_16_5, E_Locations.l_16_4); state.p_Connected.set(E_Locations.l_16_1, E_Locations.l_15_1); state.p_Connected.set(E_Locations.l_14_9, E_Locations.l_13_9); state.p_Connected.set(E_Locations.l_15_5, E_Locations.l_15_4); state.p_Connected.set(E_Locations.l_6_14, E_Locations.l_5_14); state.p_Connected.set(E_Locations.l_16_4, E_Locations.l_16_3); state.p_Connected.set(E_Locations.l_1_16, E_Locations.l_1_15); state.p_Connected.set(E_Locations.l_4_9, E_Locations.l_4_8); state.p_Connected.set(E_Locations.l_5_11, E_Locations.l_4_11); state.p_Connected.set(E_Locations.l_10_16, E_Locations.l_10_15); state.p_Connected.set(E_Locations.l_3_2, E_Locations.l_4_2); state.p_Connected.set(E_Locations.l_5_15, E_Locations.l_6_15); state.p_Connected.set(E_Locations.l_1_3, E_Locations.l_1_4); state.p_Connected.set(E_Locations.l_9_4, E_Locations.l_9_5); state.p_Connected.set(E_Locations.l_15_3, E_Locations.l_15_4); state.p_Connected.set(E_Locations.l_9_14, E_Locations.l_8_14); state.p_Connected.set(E_Locations.l_11_5, E_Locations.l_12_5); state.p_Connected.set(E_Locations.l_4_16, E_Locations.l_5_16); state.p_Connected.set(E_Locations.l_5_13, E_Locations.l_4_13); state.p_Connected.set(E_Locations.l_12_8, E_Locations.l_12_7); state.p_Connected.set(E_Locations.l_8_14, E_Locations.l_8_13); state.p_Connected.set(E_Locations.l_9_8, E_Locations.l_10_8); state.p_Connected.set(E_Locations.l_2_13, E_Locations.l_3_13); state.p_Connected.set(E_Locations.l_7_4, E_Locations.l_7_5); state.p_Connected.set(E_Locations.l_12_13, E_Locations.l_11_13); state.p_Connected.set(E_Locations.l_4_6, E_Locations.l_4_7); state.p_Connected.set(E_Locations.l_9_14, E_Locations.l_9_15); state.p_Connected.set(E_Locations.l_6_13, E_Locations.l_6_14); state.p_Connected.set(E_Locations.l_4_7, E_Locations.l_4_6); state.p_Connected.set(E_Locations.l_9_10, E_Locations.l_10_10); state.p_Connected.set(E_Locations.l_12_14, E_Locations.l_11_14); state.p_Connected.set(E_Locations.l_12_3, E_Locations.l_12_4); state.p_Connected.set(E_Locations.l_15_12, E_Locations.l_14_12); state.p_Connected.set(E_Locations.l_5_2, E_Locations.l_5_1); state.p_Connected.set(E_Locations.l_9_2, E_Locations.l_10_2); state.p_Connected.set(E_Locations.l_14_11, E_Locations.l_13_11); state.p_Connected.set(E_Locations.l_5_10, E_Locations.l_6_10); state.p_Connected.set(E_Locations.l_13_3, E_Locations.l_14_3); state.p_Connected.set(E_Locations.l_16_4, E_Locations.l_15_4); state.p_Connected.set(E_Locations.l_7_11, E_Locations.l_7_10); state.p_Connected.set(E_Locations.l_14_11, E_Locations.l_15_11); state.p_Connected.set(E_Locations.l_8_14, E_Locations.l_7_14); state.p_Connected.set(E_Locations.l_2_6, E_Locations.l_2_7); state.p_Connected.set(E_Locations.l_15_3, E_Locations.l_15_2); state.p_Connected.set(E_Locations.l_1_8, E_Locations.l_1_7); state.p_Connected.set(E_Locations.l_5_9, E_Locations.l_5_10); state.p_Connected.set(E_Locations.l_2_10, E_Locations.l_2_11); state.p_Connected.set(E_Locations.l_1_9, E_Locations.l_1_8); state.p_Connected.set(E_Locations.l_16_15, E_Locations.l_16_16); state.p_Connected.set(E_Locations.l_9_13, E_Locations.l_10_13); state.p_Connected.set(E_Locations.l_4_11, E_Locations.l_3_11); state.p_Connected.set(E_Locations.l_7_1, E_Locations.l_6_1); state.p_Connected.set(E_Locations.l_8_16, E_Locations.l_7_16); state.p_Connected.set(E_Locations.l_7_12, E_Locations.l_7_11); state.p_Connected.set(E_Locations.l_4_5, E_Locations.l_3_5); state.p_Connected.set(E_Locations.l_7_15, E_Locations.l_7_14); state.p_Connected.set(E_Locations.l_9_4, E_Locations.l_8_4); state.p_Connected.set(E_Locations.l_3_6, E_Locations.l_3_7); state.p_Connected.set(E_Locations.l_8_8, E_Locations.l_7_8); state.p_Connected.set(E_Locations.l_10_6, E_Locations.l_10_7); state.p_Connected.set(E_Locations.l_10_15, E_Locations.l_11_15); state.p_Connected.set(E_Locations.l_4_9, E_Locations.l_3_9); state.p_Connected.set(E_Locations.l_13_6, E_Locations.l_12_6); state.p_Connected.set(E_Locations.l_16_16, E_Locations.l_16_15); state.p_Connected.set(E_Locations.l_12_11, E_Locations.l_12_12); state.p_Connected.set(E_Locations.l_2_15, E_Locations.l_2_16); state.p_Connected.set(E_Locations.l_14_11, E_Locations.l_14_12); state.p_Connected.set(E_Locations.l_7_1, E_Locations.l_8_1); state.p_Connected.set(E_Locations.l_12_1, E_Locations.l_12_2); state.p_Connected.set(E_Locations.l_14_5, E_Locations.l_13_5); state.p_Connected.set(E_Locations.l_1_14, E_Locations.l_1_15); state.p_Connected.set(E_Locations.l_3_14, E_Locations.l_2_14); state.p_Connected.set(E_Locations.l_6_13, E_Locations.l_6_12); state.p_Connected.set(E_Locations.l_6_9, E_Locations.l_6_8); state.p_Connected.set(E_Locations.l_12_10, E_Locations.l_12_9); state.p_Connected.set(E_Locations.l_16_8, E_Locations.l_15_8); state.p_Connected.set(E_Locations.l_8_7, E_Locations.l_8_6); state.p_Connected.set(E_Locations.l_15_6, E_Locations.l_15_7); state.p_Connected.set(E_Locations.l_9_15, E_Locations.l_10_15); state.p_Connected.set(E_Locations.l_8_3, E_Locations.l_8_4); state.p_Connected.set(E_Locations.l_3_12, E_Locations.l_3_13); state.p_Connected.set(E_Locations.l_11_14, E_Locations.l_12_14); state.p_Connected.set(E_Locations.l_8_13, E_Locations.l_9_13); state.p_Connected.set(E_Locations.l_6_7, E_Locations.l_5_7); state.p_Connected.set(E_Locations.l_9_7, E_Locations.l_9_8); state.p_Connected.set(E_Locations.l_12_14, E_Locations.l_12_15); state.p_Connected.set(E_Locations.l_13_5, E_Locations.l_13_6); state.p_Connected.set(E_Locations.l_9_1, E_Locations.l_8_1); state.p_Connected.set(E_Locations.l_7_8, E_Locations.l_8_8); state.p_Connected.set(E_Locations.l_2_10, E_Locations.l_1_10); state.p_Connected.set(E_Locations.l_4_12, E_Locations.l_3_12); state.p_Connected.set(E_Locations.l_16_7, E_Locations.l_16_6); state.p_Connected.set(E_Locations.l_15_9, E_Locations.l_15_8); state.p_Connected.set(E_Locations.l_3_4, E_Locations.l_3_3); state.p_Connected.set(E_Locations.l_1_7, E_Locations.l_1_6); state.p_Connected.set(E_Locations.l_3_10, E_Locations.l_3_9); state.p_Connected.set(E_Locations.l_14_6, E_Locations.l_14_7); state.p_Connected.set(E_Locations.l_3_15, E_Locations.l_3_14); state.p_Connected.set(E_Locations.l_15_1, E_Locations.l_14_1); state.p_Connected.set(E_Locations.l_10_7, E_Locations.l_10_6); state.p_Connected.set(E_Locations.l_8_1, E_Locations.l_7_1); state.p_Connected.set(E_Locations.l_14_8, E_Locations.l_14_7); state.p_Connected.set(E_Locations.l_16_14, E_Locations.l_16_15); state.p_Connected.set(E_Locations.l_10_15, E_Locations.l_10_14); state.p_Connected.set(E_Locations.l_7_9, E_Locations.l_8_9); state.p_Connected.set(E_Locations.l_12_5, E_Locations.l_12_6); state.p_Connected.set(E_Locations.l_5_11, E_Locations.l_6_11); state.p_Connected.set(E_Locations.l_1_13, E_Locations.l_1_14); state.p_Connected.set(E_Locations.l_7_7, E_Locations.l_7_6); state.p_Connected.set(E_Locations.l_8_5, E_Locations.l_8_6); state.p_Connected.set(E_Locations.l_12_4, E_Locations.l_12_3); state.p_Connected.set(E_Locations.l_16_12, E_Locations.l_15_12); state.p_Connected.set(E_Locations.l_11_12, E_Locations.l_11_11); state.p_Connected.set(E_Locations.l_12_15, E_Locations.l_13_15); state.p_Connected.set(E_Locations.l_10_2, E_Locations.l_10_1); state.p_Connected.set(E_Locations.l_9_9, E_Locations.l_8_9); state.p_Connected.set(E_Locations.l_11_9, E_Locations.l_11_10); state.p_Connected.set(E_Locations.l_15_13, E_Locations.l_14_13); state.p_Connected.set(E_Locations.l_3_8, E_Locations.l_4_8); state.p_Connected.set(E_Locations.l_14_4, E_Locations.l_15_4); state.p_Connected.set(E_Locations.l_12_12, E_Locations.l_13_12); state.p_Connected.set(E_Locations.l_13_12, E_Locations.l_13_13); state.p_Connected.set(E_Locations.l_15_5, E_Locations.l_14_5); state.p_Connected.set(E_Locations.l_6_4, E_Locations.l_6_3); state.p_Connected.set(E_Locations.l_14_15, E_Locations.l_13_15); state.p_Connected.set(E_Locations.l_5_3, E_Locations.l_6_3); state.p_Connected.set(E_Locations.l_11_3, E_Locations.l_10_3); state.p_Connected.set(E_Locations.l_14_7, E_Locations.l_15_7); state.p_Connected.set(E_Locations.l_14_15, E_Locations.l_14_16); state.p_Connected.set(E_Locations.l_2_8, E_Locations.l_2_7); state.p_Connected.set(E_Locations.l_5_7, E_Locations.l_5_6); state.p_Connected.set(E_Locations.l_5_13, E_Locations.l_5_12); state.p_Connected.set(E_Locations.l_8_11, E_Locations.l_7_11); state.p_Connected.set(E_Locations.l_14_16, E_Locations.l_14_15); state.p_Connected.set(E_Locations.l_15_16, E_Locations.l_14_16); state.p_Connected.set(E_Locations.l_5_15, E_Locations.l_5_16); state.p_Connected.set(E_Locations.l_11_4, E_Locations.l_10_4); state.p_Connected.set(E_Locations.l_9_12, E_Locations.l_9_13); state.p_Connected.set(E_Locations.l_2_5, E_Locations.l_2_4); state.p_Connected.set(E_Locations.l_12_7, E_Locations.l_12_8); state.p_Connected.set(E_Locations.l_3_15, E_Locations.l_3_16); state.p_Connected.set(E_Locations.l_12_9, E_Locations.l_11_9); state.p_Connected.set(E_Locations.l_9_5, E_Locations.l_10_5); state.p_Connected.set(E_Locations.l_8_1, E_Locations.l_9_1); state.p_Connected.set(E_Locations.l_13_3, E_Locations.l_13_2); state.p_Connected.set(E_Locations.l_10_13, E_Locations.l_10_12); state.p_Connected.set(E_Locations.l_14_3, E_Locations.l_15_3); state.p_Connected.set(E_Locations.l_13_11, E_Locations.l_13_12); state.p_Connected.set(E_Locations.l_12_13, E_Locations.l_12_12); state.p_Connected.set(E_Locations.l_3_7, E_Locations.l_3_8); state.p_Connected.set(E_Locations.l_5_1, E_Locations.l_6_1); state.p_Connected.set(E_Locations.l_11_3, E_Locations.l_12_3); state.p_Connected.set(E_Locations.l_8_8, E_Locations.l_8_7); state.p_Connected.set(E_Locations.l_4_2, E_Locations.l_5_2); state.p_Connected.set(E_Locations.l_4_6, E_Locations.l_5_6); state.p_Connected.set(E_Locations.l_8_3, E_Locations.l_9_3); state.p_Connected.set(E_Locations.l_14_4, E_Locations.l_13_4); state.p_Connected.set(E_Locations.l_9_12, E_Locations.l_8_12); state.p_Connected.set(E_Locations.l_13_16, E_Locations.l_13_15); state.p_Connected.set(E_Locations.l_8_10, E_Locations.l_9_10); state.p_Connected.set(E_Locations.l_9_6, E_Locations.l_9_7); state.p_Connected.set(E_Locations.l_12_14, E_Locations.l_12_13); state.p_Connected.set(E_Locations.l_1_11, E_Locations.l_2_11); state.p_Connected.set(E_Locations.l_12_2, E_Locations.l_12_1); state.p_Connected.set(E_Locations.l_14_8, E_Locations.l_15_8); state.p_Connected.set(E_Locations.l_16_4, E_Locations.l_16_5); state.p_Connected.set(E_Locations.l_15_9, E_Locations.l_15_10); state.p_Connected.set(E_Locations.l_4_13, E_Locations.l_3_13); state.p_Connected.set(E_Locations.l_14_13, E_Locations.l_15_13); state.p_Connected.set(E_Locations.l_11_15, E_Locations.l_12_15); state.p_Connected.set(E_Locations.l_5_4, E_Locations.l_5_3); state.p_Connected.set(E_Locations.l_16_9, E_Locations.l_15_9); state.p_Connected.set(E_Locations.l_6_10, E_Locations.l_7_10); state.p_Connected.set(E_Locations.l_6_4, E_Locations.l_7_4); state.p_Connected.set(E_Locations.l_11_11, E_Locations.l_11_12); state.p_Connected.set(E_Locations.l_4_5, E_Locations.l_5_5); state.p_Connected.set(E_Locations.l_11_13, E_Locations.l_10_13); state.p_Connected.set(E_Locations.l_11_10, E_Locations.l_10_10); state.p_Connected.set(E_Locations.l_15_7, E_Locations.l_15_8); state.p_Connected.set(E_Locations.l_15_14, E_Locations.l_16_14); state.p_Connected.set(E_Locations.l_13_13, E_Locations.l_13_12); state.p_Connected.set(E_Locations.l_14_15, E_Locations.l_14_14); state.p_Connected.set(E_Locations.l_5_13, E_Locations.l_6_13); state.p_Connected.set(E_Locations.l_12_13, E_Locations.l_12_14); state.p_Connected.set(E_Locations.l_1_6, E_Locations.l_2_6); state.p_Connected.set(E_Locations.l_2_15, E_Locations.l_2_14); state.p_Connected.set(E_Locations.l_13_11, E_Locations.l_14_11); state.p_Connected.set(E_Locations.l_13_8, E_Locations.l_13_9); state.p_Connected.set(E_Locations.l_10_3, E_Locations.l_9_3); state.p_Connected.set(E_Locations.l_4_11, E_Locations.l_5_11); state.p_Connected.set(E_Locations.l_12_4, E_Locations.l_12_5); state.p_Connected.set(E_Locations.l_4_8, E_Locations.l_3_8); state.p_Connected.set(E_Locations.l_7_16, E_Locations.l_8_16); state.p_Connected.set(E_Locations.l_6_3, E_Locations.l_6_2); state.p_Connected.set(E_Locations.l_9_14, E_Locations.l_10_14); state.p_Connected.set(E_Locations.l_8_13, E_Locations.l_8_14); state.p_Connected.set(E_Locations.l_1_5, E_Locations.l_1_4); state.p_Connected.set(E_Locations.l_7_14, E_Locations.l_6_14); state.p_Connected.set(E_Locations.l_5_5, E_Locations.l_5_4); state.p_Connected.set(E_Locations.l_9_16, E_Locations.l_8_16); state.p_Connected.set(E_Locations.l_16_12, E_Locations.l_16_11); state.p_Connected.set(E_Locations.l_12_15, E_Locations.l_12_16); state.p_Connected.set(E_Locations.l_10_8, E_Locations.l_11_8); state.p_Connected.set(E_Locations.l_15_8, E_Locations.l_15_9); state.p_Connected.set(E_Locations.l_4_5, E_Locations.l_4_4); state.p_Connected.set(E_Locations.l_11_8, E_Locations.l_11_7); state.p_Connected.set(E_Locations.l_4_10, E_Locations.l_3_10); state.p_Connected.set(E_Locations.l_8_12, E_Locations.l_7_12); state.p_Connected.set(E_Locations.l_3_5, E_Locations.l_4_5); state.p_Connected.set(E_Locations.l_8_5, E_Locations.l_8_4); state.p_Connected.set(E_Locations.l_2_7, E_Locations.l_3_7); state.p_Connected.set(E_Locations.l_3_7, E_Locations.l_2_7); state.p_Connected.set(E_Locations.l_13_9, E_Locations.l_13_10); state.p_Entry.set(E_Ships.s, E_Locations.l_9_16); state.p_Entry.set(E_Ships.s2, E_Locations.l_1_9); state.p_Exit.set(E_Ships.s2, E_Locations.l_16_9); state.p_Exit.set(E_Ships.s, E_Locations.l_9_1); state.p_ConnectedShip.set(E_Ships.s, E_Locations.l_9_9, E_Locations.l_9_8); state.p_ConnectedShip.set(E_Ships.s, E_Locations.l_9_7, E_Locations.l_9_6); state.p_ConnectedShip.set(E_Ships.s2, E_Locations.l_3_9, E_Locations.l_4_9); state.p_ConnectedShip.set(E_Ships.s, E_Locations.l_9_10, E_Locations.l_9_9); state.p_ConnectedShip.set(E_Ships.s2, E_Locations.l_15_9, E_Locations.l_16_9); state.p_ConnectedShip.set(E_Ships.s2, E_Locations.l_1_9, E_Locations.l_2_9); state.p_ConnectedShip.set(E_Ships.s2, E_Locations.l_6_9, E_Locations.l_7_9); state.p_ConnectedShip.set(E_Ships.s2, E_Locations.l_9_9, E_Locations.l_10_9); state.p_ConnectedShip.set(E_Ships.s, E_Locations.l_9_13, E_Locations.l_9_12); state.p_ConnectedShip.set(E_Ships.s, E_Locations.l_9_6, E_Locations.l_9_5); state.p_ConnectedShip.set(E_Ships.s2, E_Locations.l_8_9, E_Locations.l_9_9); state.p_ConnectedShip.set(E_Ships.s2, E_Locations.l_11_9, E_Locations.l_12_9); state.p_ConnectedShip.set(E_Ships.s, E_Locations.l_9_11, E_Locations.l_9_10); state.p_ConnectedShip.set(E_Ships.s2, E_Locations.l_5_9, E_Locations.l_6_9); state.p_ConnectedShip.set(E_Ships.s, E_Locations.l_9_3, E_Locations.l_9_2); state.p_ConnectedShip.set(E_Ships.s, E_Locations.l_9_8, E_Locations.l_9_7); state.p_ConnectedShip.set(E_Ships.s, E_Locations.l_9_2, E_Locations.l_9_1); state.p_ConnectedShip.set(E_Ships.s2, E_Locations.l_14_9, E_Locations.l_15_9); state.p_ConnectedShip.set(E_Ships.s, E_Locations.l_9_15, E_Locations.l_9_14); state.p_ConnectedShip.set(E_Ships.s, E_Locations.l_9_14, E_Locations.l_9_13); state.p_ConnectedShip.set(E_Ships.s2, E_Locations.l_4_9, E_Locations.l_5_9); state.p_ConnectedShip.set(E_Ships.s2, E_Locations.l_2_9, E_Locations.l_3_9); state.p_ConnectedShip.set(E_Ships.s2, E_Locations.l_13_9, E_Locations.l_14_9); state.p_ConnectedShip.set(E_Ships.s2, E_Locations.l_12_9, E_Locations.l_13_9); state.p_ConnectedShip.set(E_Ships.s2, E_Locations.l_7_9, E_Locations.l_8_9); state.p_ConnectedShip.set(E_Ships.s, E_Locations.l_9_16, E_Locations.l_9_15); state.p_ConnectedShip.set(E_Ships.s, E_Locations.l_9_12, E_Locations.l_9_11); state.p_ConnectedShip.set(E_Ships.s, E_Locations.l_9_4, E_Locations.l_9_3); state.p_ConnectedShip.set(E_Ships.s, E_Locations.l_9_5, E_Locations.l_9_4); state.p_ConnectedShip.set(E_Ships.s2, E_Locations.l_10_9, E_Locations.l_11_9); } @Override public String getName() { return "AUV-Problem"; } @Override public Domain getDomain() { return domain; } @Override public State getState() { return state; } @Override public Goal getGoal() { return goal; } @Override public PDDLDeadEnd getDeadEnd() { return deadEnd; } }