hexsha
stringlengths
40
40
size
int64
3
1.05M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
5
1.02k
max_stars_repo_name
stringlengths
4
126
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
list
max_stars_count
float64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
5
1.02k
max_issues_repo_name
stringlengths
4
114
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
list
max_issues_count
float64
1
92.2k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
5
1.02k
max_forks_repo_name
stringlengths
4
136
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
list
max_forks_count
float64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2.55
99.9
max_line_length
int64
3
1k
alphanum_fraction
float64
0.25
1
index
int64
0
1M
content
stringlengths
3
1.05M
3e119cf4e3830a939a3bb44fb8e6e2faac88b27b
747
java
Java
src/main/java/com/prueba/zabud/alcancia/dominio/servicio/alcancia/ServicioSumarAlcancia.java
pedrolop90/prueba-zabud-alcancia
c8db780248715a14f23131d1b089cd95ce1536bf
[ "MIT" ]
4
2021-05-03T06:04:10.000Z
2021-07-11T15:11:46.000Z
src/main/java/com/prueba/zabud/alcancia/dominio/servicio/alcancia/ServicioSumarAlcancia.java
pedrolop90/prueba-zabud-alcancia
c8db780248715a14f23131d1b089cd95ce1536bf
[ "MIT" ]
null
null
null
src/main/java/com/prueba/zabud/alcancia/dominio/servicio/alcancia/ServicioSumarAlcancia.java
pedrolop90/prueba-zabud-alcancia
c8db780248715a14f23131d1b089cd95ce1536bf
[ "MIT" ]
1
2021-05-30T20:30:52.000Z
2021-05-30T20:30:52.000Z
25.758621
82
0.796519
7,433
package com.prueba.zabud.alcancia.dominio.servicio.alcancia; import com.prueba.zabud.alcancia.dominio.Alcancia; import com.prueba.zabud.alcancia.dominio.repositorio.alcancia.RepositorioAlcancia; /** * @author pedro */ public class ServicioSumarAlcancia { private final RepositorioAlcancia repositorioAlcancia; public ServicioSumarAlcancia(RepositorioAlcancia repositorioAlcancia) { this.repositorioAlcancia = repositorioAlcancia; } public void ejecutar(Alcancia alcancia) { repositorioAlcancia .obtenerAlcanciaPorTipoMoneda(alcancia.obtenerTipoMoneda()) .ifPresent( alcanciaVieja -> alcancia.agregarMonedas(alcanciaVieja.obtenerCantidadMonedas()) ); repositorioAlcancia.guardarAlcancia(alcancia); } }
3e119d462b93633bb08be8e4d9873f479e49b574
1,138
java
Java
wallet/src/de/schildbach/wallet/data/WalletLiveData.java
shadowmoon-waltz/bitcoin-wallet
f3e26a214179bb525331ef82b0be6f5f23677f06
[ "Apache-2.0" ]
1
2020-10-21T12:48:08.000Z
2020-10-21T12:48:08.000Z
src/test/testdata/bitcoin-wallet/data/WalletLiveData.java
TobiasOnGitHub/FAXE-Feature_Annotation_eXtraction_Engine
93b94304a3441e2d9fdb2425aacd123da53b4120
[ "Apache-2.0" ]
null
null
null
src/test/testdata/bitcoin-wallet/data/WalletLiveData.java
TobiasOnGitHub/FAXE-Feature_Annotation_eXtraction_Engine
93b94304a3441e2d9fdb2425aacd123da53b4120
[ "Apache-2.0" ]
null
null
null
31.611111
73
0.739016
7,434
/* * Copyright the original author or authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package de.schildbach.wallet.data; import de.schildbach.wallet.WalletApplication; import org.bitcoinj.wallet.Wallet; /** * @author Andreas Schildbach */ public class WalletLiveData extends AbstractWalletLiveData<Wallet> { public WalletLiveData(final WalletApplication application) { super(application, 0); } @Override protected void onWalletActive(final Wallet wallet) { postValue(wallet); } }
3e119e087f1a74ff8528f14b6704ea136df24c1f
1,551
java
Java
ace-modules/bitcola-launchpad/src/main/java/com/bitcola/exchange/launchpad/feign/IDataServiceFeign.java
wxpkerpk/exchange-base-examlple
acb61eb9d8316c5cf290481362560203eaf682a7
[ "Apache-2.0" ]
1
2021-01-05T05:34:21.000Z
2021-01-05T05:34:21.000Z
ace-modules/bitcola-launchpad/src/main/java/com/bitcola/exchange/launchpad/feign/IDataServiceFeign.java
wxpkerpk/exchange-base-examlple
acb61eb9d8316c5cf290481362560203eaf682a7
[ "Apache-2.0" ]
null
null
null
ace-modules/bitcola-launchpad/src/main/java/com/bitcola/exchange/launchpad/feign/IDataServiceFeign.java
wxpkerpk/exchange-base-examlple
acb61eb9d8316c5cf290481362560203eaf682a7
[ "Apache-2.0" ]
3
2019-12-13T07:12:46.000Z
2022-01-13T05:04:38.000Z
36.069767
114
0.776918
7,435
package com.bitcola.exchange.launchpad.feign; import com.alicp.jetcache.anno.CacheType; import com.alicp.jetcache.anno.Cached; import com.bitcola.me.entity.ColaCoinSymbol; import com.bitcola.me.entity.ColaUserChooseVo; import com.bitcola.me.entity.ColaUserEntity; import com.bitcola.me.entity.ColaUserLimit; import org.springframework.cloud.openfeign.FeignClient; import org.springframework.stereotype.Service; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RequestParam; import java.util.List; /** * @author zkq * @create 2018-11-08 11:53 **/ @FeignClient(value = "dataservice") @Service public interface IDataServiceFeign { @RequestMapping(value = "user/info",method = RequestMethod.GET) public ColaUserEntity info(@RequestParam("userId") String userId); @RequestMapping(value = "userLimit/getUserLimit",method = RequestMethod.GET) public ColaUserLimit getUserLimit(@RequestParam("userId")String userId, @RequestParam("module")String module); @RequestMapping("symbol/getPair") @Cached(expire = 120,cacheType = CacheType.LOCAL) ColaCoinSymbol getSymbol(@RequestParam(value = "pair")String pair); @RequestMapping(value = "symbol/list", method = RequestMethod.GET) List<ColaUserChooseVo> getUserFavSymbol(@RequestParam(value = "userId") String userId); @RequestMapping("symbol/getSymbols") @Cached(expire = 5,cacheType = CacheType.LOCAL) List<String> getSymbols(); }
3e119ec6164aa68965ea24e20a881d33ace37a79
35,108
java
Java
orbmain/src/main/java/com/sun/corba/ee/impl/protocol/giopmsgheaders/MessageBase.java
okummer/orb
a20dedd913ddd9199c7aa90a2154e02d8bce5a04
[ "BSD-3-Clause" ]
null
null
null
orbmain/src/main/java/com/sun/corba/ee/impl/protocol/giopmsgheaders/MessageBase.java
okummer/orb
a20dedd913ddd9199c7aa90a2154e02d8bce5a04
[ "BSD-3-Clause" ]
null
null
null
orbmain/src/main/java/com/sun/corba/ee/impl/protocol/giopmsgheaders/MessageBase.java
okummer/orb
a20dedd913ddd9199c7aa90a2154e02d8bce5a04
[ "BSD-3-Clause" ]
null
null
null
41.158265
118
0.547938
7,436
/* * Copyright (c) 1997, 2018 Oracle and/or its affiliates. All rights reserved. * * This program and the accompanying materials are made available under the * terms of the Eclipse Distribution License v. 1.0, which is available at * http://www.eclipse.org/org/documents/edl-v10.php. * * SPDX-License-Identifier: BSD-3-Clause */ package com.sun.corba.ee.impl.protocol.giopmsgheaders; import java.lang.reflect.Constructor; import java.nio.ByteBuffer; import java.util.Iterator; import org.omg.CORBA.COMM_FAILURE; import org.omg.CORBA.CompletionStatus; import org.omg.CORBA.SystemException; import org.omg.IOP.TaggedProfile; import com.sun.corba.ee.spi.ior.ObjectKey; import com.sun.corba.ee.spi.ior.IOR; import com.sun.corba.ee.spi.ior.iiop.IIOPProfile; import com.sun.corba.ee.spi.ior.iiop.IIOPFactories; import com.sun.corba.ee.spi.ior.iiop.IIOPProfileTemplate; import com.sun.corba.ee.spi.ior.iiop.GIOPVersion; import com.sun.corba.ee.spi.ior.iiop.RequestPartitioningComponent; import com.sun.corba.ee.spi.orb.ORB; import com.sun.corba.ee.spi.orb.ObjectKeyCacheEntry; import com.sun.corba.ee.spi.misc.ORBClassLoader; import com.sun.corba.ee.spi.protocol.RequestId; import com.sun.corba.ee.spi.servicecontext.ServiceContexts; import com.sun.corba.ee.spi.transport.Connection; import com.sun.corba.ee.spi.transport.TransportManager; import com.sun.corba.ee.spi.logging.ORBUtilSystemException; import com.sun.corba.ee.impl.orb.ObjectKeyCacheEntryNoObjectAdapterImpl; import com.sun.corba.ee.impl.misc.ORBUtility; import com.sun.corba.ee.spi.misc.ORBConstants; import com.sun.corba.ee.impl.protocol.AddressingDispositionException; import com.sun.corba.ee.impl.protocol.RequestIdImpl; import com.sun.corba.ee.impl.transport.MessageTraceManagerImpl; import com.sun.corba.ee.spi.trace.Giop; /** * This class acts as the base class for the various GIOP message types. This * also serves as a factory to create various message types. We currently * support GIOP 1.0, 1.1 and 1.2 message types. * * @author Ram Jeyaraman 05/14/2000 * @version 1.0 */ @Giop public abstract class MessageBase implements Message { private int threadPoolToUse; // (encodingVersion == 0x00) implies CDR encoding, // (encodingVersion > 0x00) implies Java serialization encoding version. private byte encodingVersion = ORBConstants.CDR_ENC_VERSION; private static final ORBUtilSystemException wrapper = ORBUtilSystemException.self; // Static methods public static String typeToString(byte type) { String result = type + "/"; switch (type) { case GIOPRequest: result += "GIOPRequest"; break; case GIOPReply: result += "GIOPReply"; break; case GIOPCancelRequest: result += "GIOPCancelRequest"; break; case GIOPLocateRequest: result += "GIOPLocateRequest"; break; case GIOPLocateReply: result += "GIOPLocateReply"; break; case GIOPCloseConnection: result += "GIOPCloseConnection"; break; case GIOPMessageError: result += "GIOPMessageError"; break; case GIOPFragment: result += "GIOPFragment"; break; default: result += "Unknown"; break; } return result; } public static MessageBase parseGiopHeader(ORB orb, Connection connection, ByteBuffer buf, int startPosition) { TransportManager ctm = orb.getTransportManager(); MessageTraceManagerImpl mtm = (MessageTraceManagerImpl) ctm.getMessageTraceManager(); if (mtm.isEnabled()) { mtm.recordHeaderReceived(buf); } // Sanity checks /* * check for magic corruption * check for version incompatibility * check if fragmentation is allowed based on mesg type. . 1.0 fragmentation disallowed; FragmentMessage is non-existent. . 1.1 only {Request, Reply} msgs maybe fragmented. . 1.2 only {Request, Reply, LocateRequest, LocateReply} msgs maybe fragmented. */ byte[] it = new byte[12]; buf.position(startPosition); buf.get(it); int b1, b2, b3, b4; b1 = (it[0] << 24) & 0xFF000000; b2 = (it[1] << 16) & 0x00FF0000; b3 = (it[2] << 8) & 0x0000FF00; b4 = (it[3]) & 0x000000FF; int magic = (b1 | b2 | b3 | b4); if (magic != GIOPBigMagic) { // If Magic is incorrect, it is an error. // ACTION : send MessageError and close the connection. throw wrapper.giopMagicError(); } // Extract the encoding version from the request GIOP Version, // if it contains an encoding, and set GIOP version appropriately. // For Java serialization, we use GIOP Version 1.2 message format. byte requestEncodingVersion = ORBConstants.CDR_ENC_VERSION; if ((it[4] == GIOPVersion.V13_XX.getMajor()) && (it[5] <= ORBConstants.JAVA_ENC_VERSION) && (it[5] > ORBConstants.CDR_ENC_VERSION)) { // Entering this block means the request is using Java encoding, // and the encoding version is <= this ORB's Java encoding version. requestEncodingVersion = it[5]; buf.put(startPosition + 4, GIOPVersion.V1_2.getMajor()); buf.put(startPosition + 5, GIOPVersion.V1_2.getMinor()); it[4] = GIOPVersion.V1_2.getMajor();//buf.get(4); it[5] = GIOPVersion.V1_2.getMinor();//buf.get(5); } GIOPVersion orbVersion = orb.getORBData().getGIOPVersion(); if (orb.giopDebugFlag) { dprint(".parseGIOPHeader: Message GIOP version: " + it[4] + '.' + it[5]); dprint(".parseGIOPHeader: ORB Max GIOP Version: " + orbVersion); } if ((it[4] > orbVersion.getMajor()) || ((it[4] == orbVersion.getMajor()) && (it[5] > orbVersion.getMinor())) ) { // For requests, sending ORB should use the version info // published in the IOR or may choose to use a <= version // for requests. If the version is greater than published version, // it is an error. // For replies, the ORB should always receive a version it supports // or less, but never greater (except for MessageError) // ACTION : Send back a MessageError() with the the highest version // the server ORB supports, and close the connection. if (it[7] != GIOPMessageError) { throw wrapper.giopVersionError(); } } AreFragmentsAllowed(it[4], it[5], it[6], it[7]); // create appropriate messages types MessageBase msg; switch (it[7]) { case GIOPRequest: if (orb.giopDebugFlag) { dprint(".parseGIOPHeader: creating RequestMessage"); } //msg = new RequestMessage(orb.giopDebugFlag); if ((it[4] == 0x01) && (it[5] == 0x00)) { // 1.0 msg = new RequestMessage_1_0(orb); } else if ((it[4] == 0x01) && (it[5] == 0x01)) { // 1.1 msg = new RequestMessage_1_1(orb); } else if ((it[4] == 0x01) && (it[5] == 0x02)) { // 1.2 msg = new RequestMessage_1_2(orb); } else { throw wrapper.giopVersionError(); } break; case GIOPLocateRequest: if (orb.giopDebugFlag) { dprint(".parseGIOPHeader: creating LocateRequestMessage"); } //msg = new LocateRequestMessage(orb.giopDebugFlag); if ((it[4] == 0x01) && (it[5] == 0x00)) { // 1.0 msg = new LocateRequestMessage_1_0(orb); } else if ((it[4] == 0x01) && (it[5] == 0x01)) { // 1.1 msg = new LocateRequestMessage_1_1(orb); } else if ((it[4] == 0x01) && (it[5] == 0x02)) { // 1.2 msg = new LocateRequestMessage_1_2(orb); } else { throw wrapper.giopVersionError(); } break; case GIOPCancelRequest: if (orb.giopDebugFlag) { dprint(".parseGIOPHeader: creating CancelRequestMessage"); } //msg = new CancelRequestMessage(orb.giopDebugFlag); if ((it[4] == 0x01) && (it[5] == 0x00)) { // 1.0 msg = new CancelRequestMessage_1_0(); } else if ((it[4] == 0x01) && (it[5] == 0x01)) { // 1.1 msg = new CancelRequestMessage_1_1(); } else if ((it[4] == 0x01) && (it[5] == 0x02)) { // 1.2 msg = new CancelRequestMessage_1_2(); } else { throw wrapper.giopVersionError(); } break; case GIOPReply: if (orb.giopDebugFlag) { dprint(".parseGIOPHeader: creating ReplyMessage"); } //msg = new ReplyMessage(orb.giopDebugFlag); if ((it[4] == 0x01) && (it[5] == 0x00)) { // 1.0 msg = new ReplyMessage_1_0(orb); } else if ((it[4] == 0x01) && (it[5] == 0x01)) { // 1.1 msg = new ReplyMessage_1_1(orb); } else if ((it[4] == 0x01) && (it[5] == 0x02)) { // 1.2 msg = new ReplyMessage_1_2(orb); } else { throw wrapper.giopVersionError(); } break; case GIOPLocateReply: if (orb.giopDebugFlag) { dprint(".parseGIOPHeader: creating LocateReplyMessage"); } //msg = new LocateReplyMessage(orb.giopDebugFlag); if ((it[4] == 0x01) && (it[5] == 0x00)) { // 1.0 msg = new LocateReplyMessage_1_0(orb); } else if ((it[4] == 0x01) && (it[5] == 0x01)) { // 1.1 msg = new LocateReplyMessage_1_1(orb); } else if ((it[4] == 0x01) && (it[5] == 0x02)) { // 1.2 msg = new LocateReplyMessage_1_2(orb); } else { throw wrapper.giopVersionError(); } break; case GIOPCloseConnection: // IMPORTANT: Must process the CloseConnection message as soon as // its received to avoid a potential race condition on // the connection, (i.e. another thread could try to send // a new request on the same connection while this // CloseConnection message would be getting dispatched // if the CloseConnection message were not processed // here). if (orb.giopDebugFlag) { dprint(".parseGIOPHeader: received CloseConnection message"); } COMM_FAILURE comm_failure = wrapper.connectionRebind(); connection.purgeCalls(comm_failure, false, true); throw comm_failure; case GIOPMessageError: if (orb.giopDebugFlag) { dprint(".parseGIOPHeader: creating Message for CloseConnection or MessageError"); } // REVISIT a MessageError may contain the highest version server // can support. In such a case, a new request may be made with the // correct version or the connection be simply closed. Note the // connection may have been closed by the server. //msg = new Message(orb.giopDebugFlag); if ((it[4] == 0x01) && (it[5] == 0x00)) { // 1.0 msg = new Message_1_0(); } else if ((it[4] == 0x01) && (it[5] == 0x01)) { // 1.1 msg = new Message_1_1(); } else if ((it[4] == 0x01) && (it[5] == 0x02)) { // 1.2 msg = new Message_1_1(); } else { throw wrapper.giopVersionError(); } break; case GIOPFragment: if (orb.giopDebugFlag) { dprint(".parseGIOPHeader: creating FragmentMessage"); } //msg = new FragmentMessage(orb.giopDebugFlag); if ((it[4] == 0x01) && (it[5] == 0x00)) { // 1.0 // not possible (error checking done already) // Throw exception just for completeness, and // for proper dataflow analysis in FindBugs throw wrapper.giopVersionError(); } else if ((it[4] == 0x01) && (it[5] == 0x01)) { // 1.1 msg = new FragmentMessage_1_1(); } else if ((it[4] == 0x01) && (it[5] == 0x02)) { // 1.2 msg = new FragmentMessage_1_2(); } else { throw wrapper.giopVersionError(); } break; default: if (orb.giopDebugFlag) { dprint(".parseGIOPHeader: UNKNOWN MESSAGE TYPE: " + it[7]); } // unknown message type ? // ACTION : send MessageError and close the connection throw wrapper.giopVersionError(); } // // Initialize the generic GIOP header instance variables. // if ((it[4] == 0x01) && (it[5] == 0x00)) { // 1.0 Message_1_0 msg10 = (Message_1_0) msg; msg10.magic = magic; msg10.GIOP_version = new GIOPVersion(it[4], it[5]); msg10.byte_order = (it[6] == LITTLE_ENDIAN_BIT); // 'request partitioning' not supported on GIOP version 1.0 // so just use the default thread pool, 0. msg.threadPoolToUse = 0; msg10.message_type = it[7]; msg10.message_size = readSize(it[8], it[9], it[10], it[11], msg10.isLittleEndian()) + GIOPMessageHeaderLength; } else { // 1.1 & 1.2 Message_1_1 msg11 = (Message_1_1) msg; msg11.magic = magic; msg11.GIOP_version = new GIOPVersion(it[4], it[5]); msg11.flags = (byte) (it[6] & TRAILING_TWO_BIT_BYTE_MASK); // IMPORTANT: For 'request partitioning', the thread pool to use // information is stored in the leading 6 bits of byte 6. // // IMPORTANT: Request partitioning is a PROPRIETARY EXTENSION !!! // // NOTE: Bitwise operators will promote a byte to an int before // performing a bitwise operation and bytes, ints, longs, etc // are signed types in Java. Thus, the need for the // THREAD_POOL_TO_USE_MASK operation. msg.threadPoolToUse = (it[6] >>> 2) & THREAD_POOL_TO_USE_MASK; msg11.message_type = it[7]; msg11.message_size = readSize(it[8], it[9], it[10], it[11], msg11.isLittleEndian()) + GIOPMessageHeaderLength; if (orb.giopSizeDebugFlag) { StringBuilder sb = new StringBuilder(); sb.append(typeToString(msg11.message_type)); sb.append("("); sb.append(msg11.message_size); sb.append(" bytes)"); dprint(sb.toString()); } } msg.setEncodingVersion(requestEncodingVersion); return msg; } @SuppressWarnings("deprecation") private static RequestMessage createRequest( ORB orb, GIOPVersion gv, byte encodingVersion, int request_id, boolean response_expected, byte[] object_key, String operation, ServiceContexts service_contexts, org.omg.CORBA.Principal requesting_principal) { if (gv.equals(GIOPVersion.V1_0)) { // 1.0 return new RequestMessage_1_0(orb, service_contexts, request_id, response_expected, object_key, operation, requesting_principal); } else if (gv.equals(GIOPVersion.V1_1)) { // 1.1 return new RequestMessage_1_1(orb, service_contexts, request_id, response_expected, new byte[]{0x00, 0x00, 0x00}, object_key, operation, requesting_principal); } else if (gv.equals(GIOPVersion.V1_2)) { // 1.2 // Note: Currently we use response_expected flag to decide if the // call is oneway or not. Ideally, it is possible to expect a // response on a oneway call too, but we do not support it now. byte response_flags = 0x03; if (response_expected) { response_flags = 0x03; } else { response_flags = 0x00; } /* // REVISIT The following is the correct way to do it. This gives // more flexibility. if ((DII::INV_NO_RESPONSE == false) && response_expected) { response_flags = 0x03; // regular two-way } else if ((DII::INV_NO_RESPONSE == false) && !response_expected) { // this condition is not possible } else if ((DII::INV_NO_RESPONSE == true) && response_expected) { // oneway, but we need response for LocationForwards or // SystemExceptions. response_flags = 0x01; } else if ((DII::INV_NO_RESPONSE == true) && !response_expected) { // oneway, no response required response_flags = 0x00; } */ TargetAddress target = new TargetAddress(); target.object_key(object_key); RequestMessage msg = new RequestMessage_1_2(orb, request_id, response_flags, new byte[]{0x00, 0x00, 0x00}, target, operation, service_contexts); msg.setEncodingVersion(encodingVersion); return msg; } else { throw wrapper.giopVersionError(); } } @SuppressWarnings({"deprecation"}) public static RequestMessage createRequest( ORB orb, GIOPVersion gv, byte encodingVersion, int request_id, boolean response_expected, IOR ior, short addrDisp, String operation, ServiceContexts service_contexts, org.omg.CORBA.Principal requesting_principal) { RequestMessage requestMessage = null; IIOPProfile profile = ior.getProfile(); if (addrDisp == KeyAddr.value) { // object key will be used for target addressing profile = ior.getProfile(); ObjectKey objKey = profile.getObjectKey(); byte[] object_key = objKey.getBytes(orb); requestMessage = createRequest(orb, gv, encodingVersion, request_id, response_expected, object_key, operation, service_contexts, requesting_principal); } else { if (!(gv.equals(GIOPVersion.V1_2))) { // only object_key based target addressing is allowed for // GIOP 1.0 & 1.1 throw wrapper.giopVersionError(); } // Note: Currently we use response_expected flag to decide if the // call is oneway or not. Ideally, it is possible to expect a // response on a oneway call too, but we do not support it now. byte response_flags = 0x03; if (response_expected) { response_flags = 0x03; } else { response_flags = 0x00; } TargetAddress target = new TargetAddress(); if (addrDisp == ProfileAddr.value) { // iop profile will be used profile = ior.getProfile(); target.profile(profile.getIOPProfile()); } else if (addrDisp == ReferenceAddr.value) { // ior will be used IORAddressingInfo iorInfo = new IORAddressingInfo(0, // profile index ior.getIOPIOR()); target.ior(iorInfo); } else { // invalid target addressing disposition value throw wrapper.illegalTargetAddressDisposition(); } requestMessage = new RequestMessage_1_2(orb, request_id, response_flags, new byte[]{0x00, 0x00, 0x00}, target, operation, service_contexts); requestMessage.setEncodingVersion(encodingVersion); } if (gv.supportsIORIIOPProfileComponents()) { // add request partitioning thread pool to use info int poolToUse = 0; // default pool IIOPProfileTemplate temp = (IIOPProfileTemplate) profile.getTaggedProfileTemplate(); Iterator iter = temp.iteratorById(ORBConstants.TAG_REQUEST_PARTITIONING_ID); if (iter.hasNext()) { poolToUse = ((RequestPartitioningComponent) iter.next()).getRequestPartitioningId(); } if (poolToUse < ORBConstants.REQUEST_PARTITIONING_MIN_THREAD_POOL_ID || poolToUse > ORBConstants.REQUEST_PARTITIONING_MAX_THREAD_POOL_ID) { throw wrapper.invalidRequestPartitioningId(poolToUse, ORBConstants.REQUEST_PARTITIONING_MIN_THREAD_POOL_ID, ORBConstants.REQUEST_PARTITIONING_MAX_THREAD_POOL_ID); } requestMessage.setThreadPoolToUse(poolToUse); } return requestMessage; } public static ReplyMessage createReply( ORB orb, GIOPVersion gv, byte encodingVersion, int request_id, int reply_status, ServiceContexts service_contexts, IOR ior) { if (gv.equals(GIOPVersion.V1_0)) { // 1.0 return new ReplyMessage_1_0(orb, service_contexts, request_id, reply_status, ior); } else if (gv.equals(GIOPVersion.V1_1)) { // 1.1 return new ReplyMessage_1_1(orb, service_contexts, request_id, reply_status, ior); } else if (gv.equals(GIOPVersion.V1_2)) { // 1.2 ReplyMessage msg = new ReplyMessage_1_2(orb, request_id, reply_status, service_contexts, ior); msg.setEncodingVersion(encodingVersion); return msg; } else { throw wrapper.giopVersionError(); } } public static LocateRequestMessage createLocateRequest( ORB orb, GIOPVersion gv, byte encodingVersion, int request_id, byte[] object_key) { if (gv.equals(GIOPVersion.V1_0)) { // 1.0 return new LocateRequestMessage_1_0(orb, request_id, object_key); } else if (gv.equals(GIOPVersion.V1_1)) { // 1.1 return new LocateRequestMessage_1_1(orb, request_id, object_key); } else if (gv.equals(GIOPVersion.V1_2)) { // 1.2 TargetAddress target = new TargetAddress(); target.object_key(object_key); LocateRequestMessage msg = new LocateRequestMessage_1_2(orb, request_id, target); msg.setEncodingVersion(encodingVersion); return msg; } else { throw wrapper.giopVersionError(); } } public static LocateReplyMessage createLocateReply( ORB orb, GIOPVersion gv, byte encodingVersion, int request_id, int locate_status, IOR ior) { if (gv.equals(GIOPVersion.V1_0)) { // 1.0 return new LocateReplyMessage_1_0(orb, request_id, locate_status, ior); } else if (gv.equals(GIOPVersion.V1_1)) { // 1.1 return new LocateReplyMessage_1_1(orb, request_id, locate_status, ior); } else if (gv.equals(GIOPVersion.V1_2)) { // 1.2 LocateReplyMessage msg = new LocateReplyMessage_1_2(orb, request_id, locate_status, ior); msg.setEncodingVersion(encodingVersion); return msg; } else { throw wrapper.giopVersionError(); } } public static CancelRequestMessage createCancelRequest( GIOPVersion gv, int request_id) { if (gv.equals(GIOPVersion.V1_0)) { // 1.0 return new CancelRequestMessage_1_0(request_id); } else if (gv.equals(GIOPVersion.V1_1)) { // 1.1 return new CancelRequestMessage_1_1(request_id); } else if (gv.equals(GIOPVersion.V1_2)) { // 1.2 return new CancelRequestMessage_1_2(request_id); } else { throw wrapper.giopVersionError(); } } public static Message createCloseConnection(GIOPVersion gv) { if (gv.equals(GIOPVersion.V1_0)) { // 1.0 return new Message_1_0(Message.GIOPBigMagic, false, Message.GIOPCloseConnection, 0); } else if (gv.equals(GIOPVersion.V1_1)) { // 1.1 return new Message_1_1(Message.GIOPBigMagic, GIOPVersion.V1_1, FLAG_NO_FRAG_BIG_ENDIAN, Message.GIOPCloseConnection, 0); } else if (gv.equals(GIOPVersion.V1_2)) { // 1.2 return new Message_1_1(Message.GIOPBigMagic, GIOPVersion.V1_2, FLAG_NO_FRAG_BIG_ENDIAN, Message.GIOPCloseConnection, 0); } else { throw wrapper.giopVersionError(); } } public static Message createMessageError(GIOPVersion gv) { if (gv.equals(GIOPVersion.V1_0)) { // 1.0 return new Message_1_0(Message.GIOPBigMagic, false, Message.GIOPMessageError, 0); } else if (gv.equals(GIOPVersion.V1_1)) { // 1.1 return new Message_1_1(Message.GIOPBigMagic, GIOPVersion.V1_1, FLAG_NO_FRAG_BIG_ENDIAN, Message.GIOPMessageError, 0); } else if (gv.equals(GIOPVersion.V1_2)) { // 1.2 return new Message_1_1(Message.GIOPBigMagic, GIOPVersion.V1_2, FLAG_NO_FRAG_BIG_ENDIAN, Message.GIOPMessageError, 0); } else { throw wrapper.giopVersionError(); } } /** * Set a flag in the given buffer (fragment bit, byte order bit, etc) */ public static void setFlag(ByteBuffer byteBuffer, int flag) { byte b = byteBuffer.get(6); b |= flag; byteBuffer.put(6, b); } private static void AreFragmentsAllowed(byte major, byte minor, byte flag, byte msgType) { if ((major == 0x01) && (minor == 0x00)) { // 1.0 if (msgType == GIOPFragment) { throw wrapper.fragmentationDisallowed(); } } if ((flag & MORE_FRAGMENTS_BIT) == MORE_FRAGMENTS_BIT) { switch (msgType) { case GIOPCancelRequest: case GIOPCloseConnection: case GIOPMessageError: throw wrapper.fragmentationDisallowed(); case GIOPLocateRequest: case GIOPLocateReply: if ((major == 0x01) && (minor == 0x01)) { // 1.1 throw wrapper.fragmentationDisallowed(); } break; } } } /** * Extract the object key from TargetAddress. * * @return ObjectKey the object key. */ static ObjectKeyCacheEntry extractObjectKeyCacheEntry(TargetAddress target, ORB orb) { short orbTargetAddrPref = orb.getORBData().getGIOPTargetAddressPreference(); short reqAddrDisp = target.discriminator(); switch (orbTargetAddrPref) { case ORBConstants.ADDR_DISP_OBJKEY: if (reqAddrDisp != KeyAddr.value) { throw new AddressingDispositionException(KeyAddr.value); } break; case ORBConstants.ADDR_DISP_PROFILE: if (reqAddrDisp != ProfileAddr.value) { throw new AddressingDispositionException(ProfileAddr.value); } break; case ORBConstants.ADDR_DISP_IOR: if (reqAddrDisp != ReferenceAddr.value) { throw new AddressingDispositionException(ReferenceAddr.value); } break; case ORBConstants.ADDR_DISP_HANDLE_ALL: break; default: throw wrapper.orbTargetAddrPreferenceInExtractObjectkeyInvalid(); } try { switch (reqAddrDisp) { case KeyAddr.value: byte[] objKey = target.object_key(); if (objKey != null) { // AddressingDisposition::KeyAddr return orb.extractObjectKeyCacheEntry(objKey); } break; case ProfileAddr.value: TaggedProfile profile = target.profile(); if (profile != null) { // AddressingDisposition::ProfileAddr IIOPProfile iiopProfile = IIOPFactories.makeIIOPProfile(orb, profile); ObjectKey objectKey = iiopProfile.getObjectKey(); return new ObjectKeyCacheEntryNoObjectAdapterImpl(objectKey); } break; case ReferenceAddr.value: IORAddressingInfo iorInfo = target.ior(); if (iorInfo != null) { // AddressingDisposition::IORAddr profile = iorInfo.ior.profiles[iorInfo.selected_profile_index]; IIOPProfile iiopProfile = IIOPFactories.makeIIOPProfile(orb, profile); ObjectKey objectKey = iiopProfile.getObjectKey(); return new ObjectKeyCacheEntryNoObjectAdapterImpl(objectKey); } break; default: // this cannot happen // There is no need for a explicit exception, since the // TargetAddressHelper.read() would have raised a BAD_OPERATION // exception by now. break; } } catch (Exception e) { throw wrapper.invalidObjectKey(e); } // If we got here, something went wrong: the object key is null. throw wrapper.invalidObjectKey(); } private static int readSize(byte b1, byte b2, byte b3, byte b4, boolean littleEndian) { int a1, a2, a3, a4; if (!littleEndian) { a1 = (b1 << 24) & 0xFF000000; a2 = (b2 << 16) & 0x00FF0000; a3 = (b3 << 8) & 0x0000FF00; a4 = (b4) & 0x000000FF; } else { a1 = (b4 << 24) & 0xFF000000; a2 = (b3 << 16) & 0x00FF0000; a3 = (b2 << 8) & 0x0000FF00; a4 = (b1) & 0x000000FF; } return (a1 | a2 | a3 | a4); } static void nullCheck(Object obj) { if (obj == null) { throw wrapper.nullNotAllowed(); } } static SystemException getSystemException( String exClassName, int minorCode, CompletionStatus completionStatus, String message, ORBUtilSystemException wrapper) { SystemException sysEx; try { Class<?> clazz = ORBClassLoader.loadClass(exClassName); if (message == null) { sysEx = (SystemException) clazz.newInstance(); } else { Class<?>[] types = {String.class}; Constructor<?> constructor = clazz.getConstructor(types); Object[] args = {message}; sysEx = (SystemException) constructor.newInstance(args); } } catch (Exception someEx) { throw wrapper.badSystemExceptionInReply(someEx); } sysEx.minor = minorCode; sysEx.completed = completionStatus; return sysEx; } public void callback(MessageHandler handler) throws java.io.IOException { handler.handleInput(this); } public int getThreadPoolToUse() { return threadPoolToUse; } public byte getEncodingVersion() { return this.encodingVersion; } public void setEncodingVersion(byte version) { this.encodingVersion = version; } /** * Return a Message's CorbaRequestId. * NOTE: This method should be overridden for messages that support * a 4 byte request id following the 12 byte GIOP message header. */ public RequestId getCorbaRequestId() { return RequestIdImpl.UNKNOWN_CORBA_REQUEST_ID; } /** * Returns true if this message could be followed by a fragment. */ public boolean supportsFragments() { return false; } /** * Get the request id from the 4 bytes following the 12 byte GIOP * request header if the request header exists. Otherwise, return 0. * <p> * NOTE: Assumes Message already been filtered by * MessageBase.messageSupportsFragments(Message) * </p> * @return <code>CorbaRequestId</code>if <code>Message</code> supports a 12 * + 4 byte GIOP header. Otherwise returns a CorbaRequestId with an * undefined request id. */ public static RequestId getRequestIdFromMessageBytes(Message message, ByteBuffer byteBuffer) { if (!(message instanceof Message_1_2)) { return RequestIdImpl.UNKNOWN_CORBA_REQUEST_ID; // in older protocols the request ID is not so easily found } else { Message_1_2 message_1_2 = (Message_1_2) message; message_1_2.unmarshalRequestID(byteBuffer); return new RequestIdImpl(message_1_2.request_id); } } private static void dprint(String msg) { ORBUtility.dprint("MessageBase", msg); } }
3e119edf7f3ab4b58fc1c432f668a6e814406664
5,658
java
Java
CompGuide-Web/CompGuide-Web/src/main/java/com/compguide/web/Persistence/Entities/WaitingTime.java
antoniosilva9116/CompGuide-Web
97105fcd99ab8d7e8bc43403384e0766f853a004
[ "Apache-2.0" ]
null
null
null
CompGuide-Web/CompGuide-Web/src/main/java/com/compguide/web/Persistence/Entities/WaitingTime.java
antoniosilva9116/CompGuide-Web
97105fcd99ab8d7e8bc43403384e0766f853a004
[ "Apache-2.0" ]
2
2021-06-03T23:55:38.000Z
2021-06-04T01:00:06.000Z
CompGuide-Web/CompGuide-Web/src/main/java/com/compguide/web/Persistence/Entities/WaitingTime.java
antoniosilva9116/CompGuide-Web
97105fcd99ab8d7e8bc43403384e0766f853a004
[ "Apache-2.0" ]
null
null
null
34.084337
237
0.70714
7,437
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package com.compguide.web.Persistence.Entities; import java.io.Serializable; import java.util.List; import javax.persistence.Basic; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.JoinColumn; import javax.persistence.ManyToOne; import javax.persistence.NamedQueries; import javax.persistence.NamedQuery; import javax.persistence.OneToMany; import javax.persistence.Table; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlTransient; /** * * @author António */ @Entity @Table(name = "waitingtime") @XmlRootElement @NamedQueries({ @NamedQuery(name = "WaitingTime.findAll", query = "SELECT w FROM WaitingTime w"), @NamedQuery(name = "WaitingTime.findByWaitingTimeID", query = "SELECT w FROM WaitingTime w WHERE w.waitingTimeID = :waitingTimeID"), @NamedQuery(name = "WaitingTime.findByExactWaitingTime", query = "SELECT w FROM WaitingTime w WHERE w.exactWaitingTime = :exactWaitingTime"), @NamedQuery(name = "WaitingTime.findByMinWaitingTime", query = "SELECT w FROM WaitingTime w WHERE w.minWaitingTime = :minWaitingTime"), @NamedQuery(name = "WaitingTime.findByMaxWaitingTime", query = "SELECT w FROM WaitingTime w WHERE w.maxWaitingTime = :maxWaitingTime"), @NamedQuery(name = "WaitingTime.findByMinMaxWaitingTimeAndTemporalUnitID", query = "SELECT w FROM WaitingTime w WHERE w.minWaitingTime = :minWaitingTime AND w.maxWaitingTime = :maxWaitingTime AND w.temporalUnitID = :temporalUnitID"), @NamedQuery(name = "WaitingTime.findByExactWaitingTimeAndTemporalUnitID", query = "SELECT w FROM WaitingTime w WHERE w.exactWaitingTime = :exactWaitingTime AND w.temporalUnitID = :temporalUnitID")}) public class WaitingTime implements Serializable { private static final long serialVersionUID = 1L; @Id @GeneratedValue(strategy = GenerationType.IDENTITY) @Basic(optional = false) @Column(name = "WaitingTimeID") private Integer waitingTimeID; // @Max(value=?) @Min(value=?)//if you know range of your decimal fields consider using these annotations to enforce field validation @Column(name = "ExactWaitingTime") private Double exactWaitingTime; @Column(name = "MinWaitingTime") private Double minWaitingTime; @Column(name = "MaxWaitingTime") private Double maxWaitingTime; @OneToMany(mappedBy = "waitingTimeID") private List<TemporalElement> temporalElementList; @JoinColumn(name = "TemporalUnitID", referencedColumnName = "TemporalUnitID") @ManyToOne(optional = false) private TemporalUnit temporalUnitID; public WaitingTime() { } public WaitingTime(Integer waitingTimeID) { this.waitingTimeID = waitingTimeID; } public WaitingTime(Double minWaitingTime, Double maxWaitingTime) { this.minWaitingTime = minWaitingTime; this.maxWaitingTime = maxWaitingTime; } public WaitingTime(Double exactWaitingTime) { this.exactWaitingTime = exactWaitingTime; } public Integer getWaitingTimeID() { return waitingTimeID; } public void setWaitingTimeID(Integer waitingTimeID) { this.waitingTimeID = waitingTimeID; } public Double getExactWaitingTime() { return exactWaitingTime; } public void setExactWaitingTime(Double exactWaitingTime) { this.exactWaitingTime = exactWaitingTime; } public Double getMinWaitingTime() { return minWaitingTime; } public void setMinWaitingTime(Double minWaitingTime) { this.minWaitingTime = minWaitingTime; } public Double getMaxWaitingTime() { return maxWaitingTime; } public void setMaxWaitingTime(Double maxWaitingTime) { this.maxWaitingTime = maxWaitingTime; } @XmlTransient public List<TemporalElement> getTemporalElementList() { return temporalElementList; } public void setTemporalElementList(List<TemporalElement> temporalElementList) { this.temporalElementList = temporalElementList; } public TemporalUnit getTemporalUnitID() { return temporalUnitID; } public void setTemporalUnitID(TemporalUnit temporalUnitID) { this.temporalUnitID = temporalUnitID; } public boolean asExactValue() { if (exactWaitingTime != null) { return true; } return false; } public boolean asInterval() { if (minWaitingTime != null && maxWaitingTime != null) { return true; } return false; } @Override public int hashCode() { int hash = 0; hash += (waitingTimeID != null ? waitingTimeID.hashCode() : 0); return hash; } @Override public boolean equals(Object object) { // TODO: Warning - this method won't work in the case the id fields are not set if (!(object instanceof WaitingTime)) { return false; } WaitingTime other = (WaitingTime) object; if ((this.waitingTimeID == null && other.waitingTimeID != null) || (this.waitingTimeID != null && !this.waitingTimeID.equals(other.waitingTimeID))) { return false; } return true; } @Override public String toString() { return "com.compguide.web.Persistence.Entities.WaitingTime[ waitingTimeID=" + waitingTimeID + " ]"; } }
3e119fcd057af6317b68be08c59d07bf7ab728ca
1,366
java
Java
tag-main/test/com/granule/tag/utils/PathUtilsTest.java
arammal/Granule
5f185d4896d79ff8033162ae2b478324b31fcfd7
[ "Apache-2.0" ]
25
2015-01-12T07:46:53.000Z
2020-05-15T17:30:54.000Z
tag-main/src/test/java/com/granule/tag/utils/PathUtilsTest.java
digaobarbosa/Granule
9ce6844b6bbc0f9ec440fa3a96e4ab32cf2fe53b
[ "Apache-2.0" ]
9
2015-03-17T10:41:55.000Z
2021-08-06T07:10:58.000Z
tag-main/src/test/java/com/granule/tag/utils/PathUtilsTest.java
digaobarbosa/Granule
9ce6844b6bbc0f9ec440fa3a96e4ab32cf2fe53b
[ "Apache-2.0" ]
24
2015-03-12T14:15:36.000Z
2021-12-13T23:59:31.000Z
31.045455
80
0.65959
7,438
/* * Copyright 2010 Granule Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.granule.tag.utils; import com.granule.utils.PathUtils; import junit.framework.TestCase; public class PathUtilsTest extends TestCase { public void testSlash() { testClean("/aa", "/aa"); testClean("./aa", "./aa"); testClean("c:/aa", "c:/aa"); testClean("c:\\aa//", "c:/aa/"); testClean("c:/aa/.\\", "c:/aa/"); } public void testBack() { testClean("c:/aa/..\\", "c:/"); testClean("css/../images/yolko.jpg","images/yolko.jpg"); testClean(".\\css/../images/yolko.jpg","./images/yolko.jpg"); testClean("/css/../images/yolko.jpg","/images/yolko.jpg"); testClean("/\\aa/bbb/./ccc/..\\../ddd//fff", "/aa/ddd/fff"); } private void testClean(String in, String out) { assertEquals(PathUtils.clean(in), out); } }
3e11a0c195152f88edf7c228730c99e95b90f666
3,101
java
Java
core/contract/src/main/java/org/eclipse/dataspaceconnector/contract/ContractServiceExtension.java
ma3u/DataSpaceConnector
7a0156449e357f96a488f426e5a3a3d62ac69495
[ "Apache-2.0" ]
null
null
null
core/contract/src/main/java/org/eclipse/dataspaceconnector/contract/ContractServiceExtension.java
ma3u/DataSpaceConnector
7a0156449e357f96a488f426e5a3a3d62ac69495
[ "Apache-2.0" ]
null
null
null
core/contract/src/main/java/org/eclipse/dataspaceconnector/contract/ContractServiceExtension.java
ma3u/DataSpaceConnector
7a0156449e357f96a488f426e5a3a3d62ac69495
[ "Apache-2.0" ]
null
null
null
33.706522
127
0.707191
7,439
/* * Copyright (c) 2021 Daimler TSS GmbH * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0 * * SPDX-License-Identifier: Apache-2.0 * * Contributors: * Daimler TSS GmbH - Initial API and Implementation * */ package org.eclipse.dataspaceconnector.contract; import org.eclipse.dataspaceconnector.spi.asset.AssetIndex; import org.eclipse.dataspaceconnector.spi.contract.ContractOfferFramework; import org.eclipse.dataspaceconnector.spi.contract.ContractOfferService; import org.eclipse.dataspaceconnector.spi.monitor.Monitor; import org.eclipse.dataspaceconnector.spi.system.ServiceExtension; import org.eclipse.dataspaceconnector.spi.system.ServiceExtensionContext; import java.util.Set; public class ContractServiceExtension implements ServiceExtension { private static final String NAME = "Core Contract Service Extension"; private static final String[] PROVIDES = { ContractOfferService.class.getName() }; private Monitor monitor; @Override public final Set<String> provides() { return Set.of(PROVIDES); } @Override public final Set<String> requires() { return Set.of(AssetIndex.FEATURE); } @Override public void initialize(ServiceExtensionContext serviceExtensionContext) { monitor = serviceExtensionContext.getMonitor(); registerServices(serviceExtensionContext); monitor.info(String.format("Initialized %s", NAME)); } @Override public void start() { monitor.info(String.format("Started %s", NAME)); } @Override public void shutdown() { monitor.info(String.format("Shutdown %s", NAME)); } private void registerServices(ServiceExtensionContext serviceExtensionContext) { AssetIndex assetIndex = serviceExtensionContext.getService(AssetIndex.class, true); if (assetIndex == null) { monitor.warning("No AssetIndex registered. Register one to create Contract Offers."); assetIndex = new NullAssetIndex(); } ContractOfferFramework contractOfferFramework = serviceExtensionContext.getService(ContractOfferFramework.class, true); if (contractOfferFramework == null) { monitor.warning("No ContractOfferFramework registered. Register one to create Contract Offers."); contractOfferFramework = new NullContractOfferFramework(); } /* * Contract offer service calculates contract offers using a variety of contract offer frameworks * ad the given asset index. */ final ContractOfferService contractOfferService = new ContractOfferServiceImpl( contractOfferFramework, assetIndex ); /* * Register the just created contract offer service to the service extension context. */ serviceExtensionContext.registerService(ContractOfferService.class, contractOfferService); } }
3e11a15c3624c421da7c20e64d1c897ba3b3d189
1,475
java
Java
java/signature/src/com/test/signature/tests/TypeContextTests.java
calestar/experiments
6bebf93706334d601694ba51bf4011f788757ff8
[ "MIT" ]
1
2021-11-03T02:23:13.000Z
2021-11-03T02:23:13.000Z
java/signature/src/com/test/signature/tests/TypeContextTests.java
calestar/experiments
6bebf93706334d601694ba51bf4011f788757ff8
[ "MIT" ]
4
2021-08-31T22:13:34.000Z
2022-02-14T03:36:40.000Z
java/signature/src/com/test/signature/tests/TypeContextTests.java
calestar/experiments
6bebf93706334d601694ba51bf4011f788757ff8
[ "MIT" ]
null
null
null
26.818182
93
0.705763
7,440
/*** * Copyright (c) 2020, 2021 Jean-Sebastien Gelinas, see LICENSE at the root of the repository ***/ package com.test.signature.tests; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; import org.junit.jupiter.api.Test; import com.test.signature.TypeContext; class TypeContextTests { @Test void test_empty_resolver() { TypeContext resolver = new TypeContext(); assertNull(resolver.resolveType("anything")); } @Test void test_one_result() { TypeContext resolver = new TypeContext(); resolver.register("something", "x.y.z"); resolver.register("else", "x.y.z"); String[][] results = resolver.resolveType("something"); assertEquals(1, results.length); String[] result = results[0]; assertEquals(2, result.length); assertEquals("x.y.z", result[0]); assertEquals("something", result[1]); } @Test void test_multiple_result() { TypeContext resolver = new TypeContext(); resolver.register("something", "x.y.z"); resolver.register("something", "a.b.c"); resolver.register("else", "x.y.z"); String[][] results = resolver.resolveType("something"); assertEquals(2, results.length); String[] result = results[0]; assertEquals(2, result.length); assertEquals("x.y.z", result[0]); assertEquals("something", result[1]); result = results[1]; assertEquals(2, result.length); assertEquals("a.b.c", result[0]); assertEquals("something", result[1]); } }
3e11a1a34b267b366a652b96166ba7913642e478
1,898
java
Java
app/src/main/java/com/citytogo/jonnyhsia/rxevangelist/helper/Kits.java
jonnyhsia/rx-evangelist
cca72f405771cc45151d51849c270ccdb593d1cf
[ "Apache-2.0" ]
null
null
null
app/src/main/java/com/citytogo/jonnyhsia/rxevangelist/helper/Kits.java
jonnyhsia/rx-evangelist
cca72f405771cc45151d51849c270ccdb593d1cf
[ "Apache-2.0" ]
null
null
null
app/src/main/java/com/citytogo/jonnyhsia/rxevangelist/helper/Kits.java
jonnyhsia/rx-evangelist
cca72f405771cc45151d51849c270ccdb593d1cf
[ "Apache-2.0" ]
null
null
null
27.911765
103
0.632771
7,441
package com.citytogo.jonnyhsia.rxevangelist.helper; import android.content.Context; import android.content.res.Configuration; import android.net.ConnectivityManager; import android.net.NetworkInfo; import android.support.annotation.NonNull; import com.citytogo.jonnyhsia.rxevangelist.App; import java.util.Random; import java.util.regex.Pattern; /** * Created by JonnyHsia on 17/10/22. * 工具集 */ public class Kits { /** * 判断网络是否连接 * * @return true 为连接, false 未连接 */ public static boolean checkNetwork() { ConnectivityManager connectivityManager = (ConnectivityManager) App.getInstance().getSystemService(Context.CONNECTIVITY_SERVICE); NetworkInfo info = null; if (connectivityManager != null) { info = connectivityManager.getActiveNetworkInfo(); } return info != null && info.isConnected(); } public static class UI { public static boolean isScreenOrientationPortrait(@NonNull Context context) { return context.getResources().getConfiguration().orientation == Configuration.ORIENTATION_PORTRAIT; } } /** * 去除空格空行 */ public static String deleteBlankSpace(String input) { return input.replaceAll("\\s*|\t|\r|\n", ""); } /** * 去除空格空行 */ public static String deleteBlankline(String input) { return input.replace("((\r\n)|\n)[\\s\t ]*(\\1)+", "$1"); } public static String generateRandomString() { String base = "兔狗科技爪哇随机用户名"; Random random = new Random(); int randomLength = Math.max(5, random.nextInt(base.length())); StringBuilder builder = new StringBuilder(); for (int i = 0; i < randomLength; i++) { builder.append(base.charAt(random.nextInt(base.length()))); } return builder.toString(); } }
3e11a2b4c6bc3e70c44691b1d19fda44cd4e64b8
2,548
java
Java
src/main/java/pl/uam/wmi/niezbednikstudenta/controllers/AuthorizeController.java
Adiiks/niezbednik-studenta-backend
97a8df20e383a83c6510889a10fb47852bfa3485
[ "MIT" ]
null
null
null
src/main/java/pl/uam/wmi/niezbednikstudenta/controllers/AuthorizeController.java
Adiiks/niezbednik-studenta-backend
97a8df20e383a83c6510889a10fb47852bfa3485
[ "MIT" ]
null
null
null
src/main/java/pl/uam/wmi/niezbednikstudenta/controllers/AuthorizeController.java
Adiiks/niezbednik-studenta-backend
97a8df20e383a83c6510889a10fb47852bfa3485
[ "MIT" ]
null
null
null
37.470588
146
0.759419
7,442
package pl.uam.wmi.niezbednikstudenta.controllers; import org.json.simple.JSONObject; import org.springframework.http.HttpStatus; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import org.springframework.web.server.ResponseStatusException; import pl.uam.wmi.niezbednikstudenta.exceptions.LoginException; import pl.uam.wmi.niezbednikstudenta.services.AuthorizeService; import java.io.IOException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; @RestController @RequestMapping("/authorization") public class AuthorizeController { private final AuthorizeService authorizeService; public AuthorizeController(AuthorizeService authorizeService) { this.authorizeService = authorizeService; } @GetMapping("/request-token") public JSONObject getAuthorizeUrlWithToken(@RequestParam(required = false, defaultValue = "oob") String oauthCallback) { try { return authorizeService.callRequestToken(oauthCallback); } catch (IOException | InvalidKeyException | NoSuchAlgorithmException | InterruptedException e) { throw new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR, "Error during request token", e); } } @GetMapping("/access-token") public JSONObject getAccessToken(@RequestParam String oauthToken, @RequestParam String oauthTokenSecret, @RequestParam String oauthVerifier) { try { return authorizeService.callAccessToken(oauthToken, oauthTokenSecret, oauthVerifier); } catch (IOException | InvalidKeyException | NoSuchAlgorithmException | InterruptedException e) { throw new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR, "Error during calling access token", e); } catch (LoginException e) { throw new ResponseStatusException(HttpStatus.BAD_REQUEST, e.getMessage()); } } @GetMapping("/logout") public String logOut(@RequestParam String oauthToken, @RequestParam String oauthTokenSecret) { try { return authorizeService.logOut(oauthToken, oauthTokenSecret); } catch (IOException | InvalidKeyException | NoSuchAlgorithmException | InterruptedException e) { throw new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR, "Error during logout", e); } } }
3e11a2fdd848bb261f7c267e9c0e80453eef5b4a
253,145
java
Java
engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
edersonbrilhante/cloudstack
f550d703053fb3ccf9df739cee31ab823d132f3a
[ "Apache-2.0" ]
null
null
null
engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
edersonbrilhante/cloudstack
f550d703053fb3ccf9df739cee31ab823d132f3a
[ "Apache-2.0" ]
2
2021-05-18T20:52:05.000Z
2021-08-25T15:56:31.000Z
engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
edersonbrilhante/cloudstack
f550d703053fb3ccf9df739cee31ab823d132f3a
[ "Apache-2.0" ]
null
null
null
49.36525
279
0.616145
7,443
// Licensed to the Apacohe Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.vm; import java.net.URI; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.TimeZone; import java.util.UUID; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; import org.apache.cloudstack.ca.CAManager; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; import org.apache.cloudstack.framework.ca.Certificate; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext; import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.framework.jobs.Outcome; import org.apache.cloudstack.framework.jobs.dao.VmWorkJobDao; import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.apache.cloudstack.framework.jobs.impl.JobSerializerHelper; import org.apache.cloudstack.framework.jobs.impl.OutcomeImpl; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.MessageDispatcher; import org.apache.cloudstack.framework.messagebus.MessageHandler; import org.apache.cloudstack.jobs.JobInfo; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; import com.cloud.agent.api.AgentControlCommand; import com.cloud.agent.api.Answer; import com.cloud.agent.api.AttachOrDettachConfigDriveCommand; import com.cloud.agent.api.CheckVirtualMachineAnswer; import com.cloud.agent.api.CheckVirtualMachineCommand; import com.cloud.agent.api.ClusterVMMetaDataSyncAnswer; import com.cloud.agent.api.ClusterVMMetaDataSyncCommand; import com.cloud.agent.api.Command; import com.cloud.agent.api.MigrateCommand; import com.cloud.agent.api.ModifyTargetsCommand; import com.cloud.agent.api.PingRoutingCommand; import com.cloud.agent.api.PlugNicAnswer; import com.cloud.agent.api.PlugNicCommand; import com.cloud.agent.api.PrepareForMigrationCommand; import com.cloud.agent.api.RebootAnswer; import com.cloud.agent.api.RebootCommand; import com.cloud.agent.api.ReplugNicAnswer; import com.cloud.agent.api.ReplugNicCommand; import com.cloud.agent.api.RestoreVMSnapshotAnswer; import com.cloud.agent.api.RestoreVMSnapshotCommand; import com.cloud.agent.api.ScaleVmCommand; import com.cloud.agent.api.StartAnswer; import com.cloud.agent.api.StartCommand; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; import com.cloud.agent.api.StopAnswer; import com.cloud.agent.api.StopCommand; import com.cloud.agent.api.UnPlugNicAnswer; import com.cloud.agent.api.UnPlugNicCommand; import com.cloud.agent.api.UnregisterVMCommand; import com.cloud.agent.api.routing.NetworkElementCommand; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.GPUDeviceTO; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.manager.Commands; import com.cloud.agent.manager.allocator.HostAllocator; import com.cloud.alert.AlertManager; import com.cloud.capacity.CapacityManager; import com.cloud.configuration.Config; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; import com.cloud.dc.Pod; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeployDestination; import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.deploy.DeploymentPlanningManager; import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; import com.cloud.exception.AffinityConflictException; import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.ConnectionException; import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.exception.InsufficientVirtualNetworkCapacityException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.exception.StorageUnavailableException; import com.cloud.ha.HighAvailabilityManager; import com.cloud.ha.HighAvailabilityManager.WorkType; import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorGuru; import com.cloud.hypervisor.HypervisorGuruManager; import com.cloud.network.Network; import com.cloud.network.NetworkModel; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; import com.cloud.network.router.VirtualRouter; import com.cloud.offering.DiskOffering; import com.cloud.offering.DiskOfferingInfo; import com.cloud.offering.ServiceOffering; import com.cloud.org.Cluster; import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.ScopeType; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.Volume.Type; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.Account; import com.cloud.user.User; import com.cloud.utils.DateUtil; import com.cloud.utils.Journal; import com.cloud.utils.Pair; import com.cloud.utils.Predicate; import com.cloud.utils.ReflectionUse; import com.cloud.utils.StringUtils; import com.cloud.utils.Ternary; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.Transaction; import com.cloud.utils.db.TransactionCallbackWithException; import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.ExecutionException; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.StateMachine2; import com.cloud.vm.ItWorkVO.Step; import com.cloud.vm.VirtualMachine.Event; import com.cloud.vm.VirtualMachine.PowerState; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.NicDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; import com.cloud.vm.snapshot.VMSnapshotManager; import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.dao.VMSnapshotDao; import com.google.common.base.Strings; public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMachineManager, VmWorkJobHandler, Listener, Configurable { private static final Logger s_logger = Logger.getLogger(VirtualMachineManagerImpl.class); public static final String VM_WORK_JOB_HANDLER = VirtualMachineManagerImpl.class.getSimpleName(); private static final String VM_SYNC_ALERT_SUBJECT = "VM state sync alert"; @Inject private DataStoreManager dataStoreMgr; @Inject private NetworkOrchestrationService _networkMgr; @Inject private NetworkModel _networkModel; @Inject private AgentManager _agentMgr; @Inject private VMInstanceDao _vmDao; @Inject private ServiceOfferingDao _offeringDao; @Inject private DiskOfferingDao _diskOfferingDao; @Inject private VMTemplateDao _templateDao; @Inject private ItWorkDao _workDao; @Inject private UserVmDao _userVmDao; @Inject private UserVmService _userVmService; @Inject private CapacityManager _capacityMgr; @Inject private NicDao _nicsDao; @Inject private HostDao _hostDao; @Inject private AlertManager _alertMgr; @Inject private GuestOSCategoryDao _guestOsCategoryDao; @Inject private GuestOSDao _guestOsDao; @Inject private VolumeDao _volsDao; @Inject private HighAvailabilityManager _haMgr; @Inject private HostPodDao _podDao; @Inject private DataCenterDao _dcDao; @Inject private ClusterDao _clusterDao; @Inject private PrimaryDataStoreDao _storagePoolDao; @Inject private HypervisorGuruManager _hvGuruMgr; @Inject private NetworkDao _networkDao; @Inject private StoragePoolHostDao _poolHostDao; @Inject private VMSnapshotDao _vmSnapshotDao; @Inject private AffinityGroupVMMapDao _affinityGroupVMMapDao; @Inject private EntityManager _entityMgr; @Inject private GuestOSCategoryDao _guestOSCategoryDao; @Inject private GuestOSDao _guestOSDao; @Inject private ServiceOfferingDao _serviceOfferingDao; @Inject private CAManager caManager; @Inject private ResourceManager _resourceMgr; @Inject private VMSnapshotManager _vmSnapshotMgr; @Inject private ClusterDetailsDao _clusterDetailsDao; @Inject private UserVmDetailsDao userVmDetailsDao; @Inject private ConfigurationDao _configDao; @Inject private VolumeOrchestrationService volumeMgr; @Inject private DeploymentPlanningManager _dpMgr; @Inject private MessageBus _messageBus; @Inject private VirtualMachinePowerStateSync _syncMgr; @Inject private VmWorkJobDao _workJobDao; @Inject private AsyncJobManager _jobMgr; VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this); Map<VirtualMachine.Type, VirtualMachineGuru> _vmGurus = new HashMap<VirtualMachine.Type, VirtualMachineGuru>(); protected StateMachine2<State, VirtualMachine.Event, VirtualMachine> _stateMachine; static final ConfigKey<Integer> StartRetry = new ConfigKey<Integer>("Advanced", Integer.class, "start.retry", "10", "Number of times to retry create and start commands", true); static final ConfigKey<Integer> VmOpWaitInterval = new ConfigKey<Integer>("Advanced", Integer.class, "vm.op.wait.interval", "120", "Time (in seconds) to wait before checking if a previous operation has succeeded", true); static final ConfigKey<Integer> VmOpLockStateRetry = new ConfigKey<Integer>("Advanced", Integer.class, "vm.op.lock.state.retry", "5", "Times to retry locking the state of a VM for operations, -1 means forever", true); static final ConfigKey<Long> VmOpCleanupInterval = new ConfigKey<Long>("Advanced", Long.class, "vm.op.cleanup.interval", "86400", "Interval to run the thread that cleans up the vm operations (in seconds)", false); static final ConfigKey<Long> VmOpCleanupWait = new ConfigKey<Long>("Advanced", Long.class, "vm.op.cleanup.wait", "3600", "Time (in seconds) to wait before cleanuping up any vm work items", true); static final ConfigKey<Long> VmOpCancelInterval = new ConfigKey<Long>("Advanced", Long.class, "vm.op.cancel.interval", "3600", "Time (in seconds) to wait before cancelling a operation", false); static final ConfigKey<Boolean> VmDestroyForcestop = new ConfigKey<Boolean>("Advanced", Boolean.class, "vm.destroy.forcestop", "false", "On destroy, force-stop takes this value ", true); static final ConfigKey<Integer> ClusterDeltaSyncInterval = new ConfigKey<Integer>("Advanced", Integer.class, "sync.interval", "60", "Cluster Delta sync interval in seconds", false); static final ConfigKey<Integer> ClusterVMMetaDataSyncInterval = new ConfigKey<Integer>("Advanced", Integer.class, "vmmetadata.sync.interval", "180", "Cluster VM metadata sync interval in seconds", false); static final ConfigKey<Long> VmJobCheckInterval = new ConfigKey<Long>("Advanced", Long.class, "vm.job.check.interval", "3000", "Interval in milliseconds to check if the job is complete", false); static final ConfigKey<Long> VmJobTimeout = new ConfigKey<Long>("Advanced", Long.class, "vm.job.timeout", "600000", "Time in milliseconds to wait before attempting to cancel a job", false); static final ConfigKey<Integer> VmJobStateReportInterval = new ConfigKey<Integer>("Advanced", Integer.class, "vm.job.report.interval", "60", "Interval to send application level pings to make sure the connection is still working", false); static final ConfigKey<Boolean> HaVmRestartHostUp = new ConfigKey<Boolean>("Advanced", Boolean.class, "ha.vm.restart.hostup", "true", "If an out-of-band stop of a VM is detected and its host is up, then power on the VM", true); ScheduledExecutorService _executor = null; private long _nodeId; private List<StoragePoolAllocator> _storagePoolAllocators; private List<HostAllocator> hostAllocators; public List<HostAllocator> getHostAllocators() { return hostAllocators; } public void setHostAllocators(final List<HostAllocator> hostAllocators) { this.hostAllocators = hostAllocators; } @Override public void registerGuru(final VirtualMachine.Type type, final VirtualMachineGuru guru) { synchronized (_vmGurus) { _vmGurus.put(type, guru); } } @Override @DB public void allocate(final String vmInstanceName, final VirtualMachineTemplate template, final ServiceOffering serviceOffering, final DiskOfferingInfo rootDiskOfferingInfo, final List<DiskOfferingInfo> dataDiskOfferings, final LinkedHashMap<? extends Network, List<? extends NicProfile>> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map<String, Map<Integer, String>> extraDhcpOptions, final Map<Long, DiskOffering> datadiskTemplateToDiskOfferingMap) throws InsufficientCapacityException { final VMInstanceVO vm = _vmDao.findVMByInstanceName(vmInstanceName); final Account owner = _entityMgr.findById(Account.class, vm.getAccountId()); if (s_logger.isDebugEnabled()) { s_logger.debug("Allocating entries for VM: " + vm); } vm.setDataCenterId(plan.getDataCenterId()); if (plan.getPodId() != null) { vm.setPodIdToDeployIn(plan.getPodId()); } assert plan.getClusterId() == null && plan.getPoolId() == null : "We currently don't support cluster and pool preset yet"; final VMInstanceVO vmFinal = _vmDao.persist(vm); final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmFinal, template, serviceOffering, null, null); Transaction.execute(new TransactionCallbackWithExceptionNoReturn<InsufficientCapacityException>() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) throws InsufficientCapacityException { if (s_logger.isDebugEnabled()) { s_logger.debug("Allocating nics for " + vmFinal); } try { if (!vmProfile.getBootArgs().contains("ExternalLoadBalancerVm")) { _networkMgr.allocate(vmProfile, auxiliaryNetworks, extraDhcpOptions); } } catch (final ConcurrentOperationException e) { throw new CloudRuntimeException("Concurrent operation while trying to allocate resources for the VM", e); } if (s_logger.isDebugEnabled()) { s_logger.debug("Allocating disks for " + vmFinal); } if (template.getFormat() == ImageFormat.ISO) { volumeMgr.allocateRawVolume(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(), rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vmFinal, template, owner, null); } else if (template.getFormat() == ImageFormat.BAREMETAL) { // Do nothing } else { volumeMgr.allocateTemplatedVolume(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(), rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), template, vmFinal, owner); } if (dataDiskOfferings != null) { for (final DiskOfferingInfo dataDiskOfferingInfo : dataDiskOfferings) { volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vmFinal.getId(), dataDiskOfferingInfo.getDiskOffering(), dataDiskOfferingInfo.getSize(), dataDiskOfferingInfo.getMinIops(), dataDiskOfferingInfo.getMaxIops(), vmFinal, template, owner, null); } } if (datadiskTemplateToDiskOfferingMap != null && !datadiskTemplateToDiskOfferingMap.isEmpty()) { int diskNumber = 1; for (Entry<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap : datadiskTemplateToDiskOfferingMap.entrySet()) { DiskOffering diskOffering = dataDiskTemplateToDiskOfferingMap.getValue(); long diskOfferingSize = diskOffering.getDiskSize() / (1024 * 1024 * 1024); VMTemplateVO dataDiskTemplate = _templateDao.findById(dataDiskTemplateToDiskOfferingMap.getKey()); volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vmFinal.getId() + "-" + String.valueOf(diskNumber), diskOffering, diskOfferingSize, null, null, vmFinal, dataDiskTemplate, owner, Long.valueOf(diskNumber)); diskNumber++; } } } }); if (s_logger.isDebugEnabled()) { s_logger.debug("Allocation completed for VM: " + vmFinal); } } @Override public void allocate(final String vmInstanceName, final VirtualMachineTemplate template, final ServiceOffering serviceOffering, final LinkedHashMap<? extends Network, List<? extends NicProfile>> networks, final DeploymentPlan plan, final HypervisorType hyperType) throws InsufficientCapacityException { allocate(vmInstanceName, template, serviceOffering, new DiskOfferingInfo(serviceOffering), new ArrayList<DiskOfferingInfo>(), networks, plan, hyperType, null, null); } private VirtualMachineGuru getVmGuru(final VirtualMachine vm) { if(vm != null) { return _vmGurus.get(vm.getType()); } return null; } @Override public void expunge(final String vmUuid) throws ResourceUnavailableException { try { advanceExpunge(vmUuid); } catch (final OperationTimedoutException e) { throw new CloudRuntimeException("Operation timed out", e); } catch (final ConcurrentOperationException e) { throw new CloudRuntimeException("Concurrent operation ", e); } } @Override public void advanceExpunge(final String vmUuid) throws ResourceUnavailableException, OperationTimedoutException, ConcurrentOperationException { final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); advanceExpunge(vm); } protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableException, OperationTimedoutException, ConcurrentOperationException { if (vm == null || vm.getRemoved() != null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Unable to find vm or vm is destroyed: " + vm); } return; } advanceStop(vm.getUuid(), false); vm = _vmDao.findByUuid(vm.getUuid()); try { if (!stateTransitTo(vm, VirtualMachine.Event.ExpungeOperation, vm.getHostId())) { s_logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm); throw new CloudRuntimeException("Unable to destroy " + vm); } } catch (final NoTransitionException e) { s_logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm); throw new CloudRuntimeException("Unable to destroy " + vm, e); } if (s_logger.isDebugEnabled()) { s_logger.debug("Destroying vm " + vm); } final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType()); s_logger.debug("Cleaning up NICS"); final List<Command> nicExpungeCommands = hvGuru.finalizeExpungeNics(vm, profile.getNics()); _networkMgr.cleanupNics(profile); s_logger.debug("Cleaning up hypervisor data structures (ex. SRs in XenServer) for managed storage"); final List<Command> volumeExpungeCommands = hvGuru.finalizeExpungeVolumes(vm); final Long hostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId(); List<Map<String, String>> targets = getTargets(hostId, vm.getId()); if (volumeExpungeCommands != null && volumeExpungeCommands.size() > 0 && hostId != null) { final Commands cmds = new Commands(Command.OnError.Stop); for (final Command volumeExpungeCommand : volumeExpungeCommands) { cmds.addCommand(volumeExpungeCommand); } _agentMgr.send(hostId, cmds); if (!cmds.isSuccessful()) { for (final Answer answer : cmds.getAnswers()) { if (!answer.getResult()) { s_logger.warn("Failed to expunge vm due to: " + answer.getDetails()); throw new CloudRuntimeException("Unable to expunge " + vm + " due to " + answer.getDetails()); } } } } if (hostId != null) { volumeMgr.revokeAccess(vm.getId(), hostId); } // Clean up volumes based on the vm's instance id volumeMgr.cleanupVolumes(vm.getId()); if (hostId != null && CollectionUtils.isNotEmpty(targets)) { removeDynamicTargets(hostId, targets); } final VirtualMachineGuru guru = getVmGuru(vm); guru.finalizeExpunge(vm); //remove the overcommit details from the uservm details userVmDetailsDao.removeDetails(vm.getId()); // send hypervisor-dependent commands before removing final List<Command> finalizeExpungeCommands = hvGuru.finalizeExpunge(vm); if (finalizeExpungeCommands != null && finalizeExpungeCommands.size() > 0) { if (hostId != null) { final Commands cmds = new Commands(Command.OnError.Stop); for (final Command command : finalizeExpungeCommands) { cmds.addCommand(command); } if (nicExpungeCommands != null) { for (final Command command : nicExpungeCommands) { cmds.addCommand(command); } } _agentMgr.send(hostId, cmds); if (!cmds.isSuccessful()) { for (final Answer answer : cmds.getAnswers()) { if (!answer.getResult()) { s_logger.warn("Failed to expunge vm due to: " + answer.getDetails()); throw new CloudRuntimeException("Unable to expunge " + vm + " due to " + answer.getDetails()); } } } } } if (s_logger.isDebugEnabled()) { s_logger.debug("Expunged " + vm); } } private List<Map<String, String>> getTargets(Long hostId, long vmId) { List<Map<String, String>> targets = new ArrayList<>(); HostVO hostVO = _hostDao.findById(hostId); if (hostVO == null || hostVO.getHypervisorType() != HypervisorType.VMware) { return targets; } List<VolumeVO> volumes = _volsDao.findByInstance(vmId); if (CollectionUtils.isEmpty(volumes)) { return targets; } for (VolumeVO volume : volumes) { StoragePoolVO storagePoolVO = _storagePoolDao.findById(volume.getPoolId()); if (storagePoolVO != null && storagePoolVO.isManaged()) { Map<String, String> target = new HashMap<>(); target.put(ModifyTargetsCommand.STORAGE_HOST, storagePoolVO.getHostAddress()); target.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); target.put(ModifyTargetsCommand.IQN, volume.get_iScsiName()); targets.add(target); } } return targets; } private void removeDynamicTargets(long hostId, List<Map<String, String>> targets) { ModifyTargetsCommand cmd = new ModifyTargetsCommand(); cmd.setTargets(targets); cmd.setApplyToAllHostsInCluster(true); cmd.setAdd(false); cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); sendModifyTargetsCommand(cmd, hostId); } private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { Answer answer = _agentMgr.easySend(hostId, cmd); if (answer == null) { String msg = "Unable to get an answer to the modify targets command"; s_logger.warn(msg); } else if (!answer.getResult()) { String msg = "Unable to modify target on the following host: " + hostId; s_logger.warn(msg); } } @Override public boolean start() { // TODO, initial delay is hardcoded _executor.scheduleAtFixedRate(new CleanupTask(), 5, VmJobStateReportInterval.value(), TimeUnit.SECONDS); _executor.scheduleAtFixedRate(new TransitionTask(), VmOpCleanupInterval.value(), VmOpCleanupInterval.value(), TimeUnit.SECONDS); cancelWorkItems(_nodeId); volumeMgr.cleanupStorageJobs(); // cleanup left over place holder works _workJobDao.expungeLeftoverWorkJobs(ManagementServerNode.getManagementServerId()); return true; } @Override public boolean stop() { return true; } @Override public boolean configure(final String name, final Map<String, Object> xmlParams) throws ConfigurationException { ReservationContextImpl.init(_entityMgr); VirtualMachineProfileImpl.init(_entityMgr); VmWorkMigrate.init(_entityMgr); _executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Vm-Operations-Cleanup")); _nodeId = ManagementServerNode.getManagementServerId(); _agentMgr.registerForHostEvents(this, true, true, true); _messageBus.subscribe(VirtualMachineManager.Topics.VM_POWER_STATE, MessageDispatcher.getDispatcher(this)); return true; } protected VirtualMachineManagerImpl() { setStateMachine(); } @Override public void start(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params) { start(vmUuid, params, null, null); } @Override public void start(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params, final DeploymentPlan planToDeploy, final DeploymentPlanner planner) { try { advanceStart(vmUuid, params, planToDeploy, planner); } catch (final ConcurrentOperationException e) { throw new CloudRuntimeException("Unable to start a VM due to concurrent operation", e).add(VirtualMachine.class, vmUuid); } catch (final InsufficientCapacityException e) { throw new CloudRuntimeException("Unable to start a VM due to insufficient capacity", e).add(VirtualMachine.class, vmUuid); } catch (final ResourceUnavailableException e) { if(e.getScope() != null && e.getScope().equals(VirtualRouter.class)){ throw new CloudRuntimeException("Network is unavailable. Please contact administrator", e).add(VirtualMachine.class, vmUuid); } throw new CloudRuntimeException("Unable to start a VM due to unavailable resources", e).add(VirtualMachine.class, vmUuid); } } protected boolean checkWorkItems(final VMInstanceVO vm, final State state) throws ConcurrentOperationException { while (true) { final ItWorkVO vo = _workDao.findByOutstandingWork(vm.getId(), state); if (vo == null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Unable to find work for VM: " + vm + " and state: " + state); } return true; } if (vo.getStep() == Step.Done) { if (s_logger.isDebugEnabled()) { s_logger.debug("Work for " + vm + " is " + vo.getStep()); } return true; } // also check DB to get latest VM state to detect vm update from concurrent process before idle waiting to get an early exit final VMInstanceVO instance = _vmDao.findById(vm.getId()); if (instance != null && instance.getState() == State.Running) { if (s_logger.isDebugEnabled()) { s_logger.debug("VM is already started in DB: " + vm); } return true; } if (vo.getSecondsTaskIsInactive() > VmOpCancelInterval.value()) { s_logger.warn("The task item for vm " + vm + " has been inactive for " + vo.getSecondsTaskIsInactive()); return false; } try { Thread.sleep(VmOpWaitInterval.value()*1000); } catch (final InterruptedException e) { s_logger.info("Waiting for " + vm + " but is interrupted"); throw new ConcurrentOperationException("Waiting for " + vm + " but is interrupted"); } s_logger.debug("Waiting some more to make sure there's no activity on " + vm); } } @DB protected Ternary<VMInstanceVO, ReservationContext, ItWorkVO> changeToStartState(final VirtualMachineGuru vmGuru, final VMInstanceVO vm, final User caller, final Account account) throws ConcurrentOperationException { final long vmId = vm.getId(); ItWorkVO work = new ItWorkVO(UUID.randomUUID().toString(), _nodeId, State.Starting, vm.getType(), vm.getId()); int retry = VmOpLockStateRetry.value(); while (retry-- != 0) { try { final ItWorkVO workFinal = work; final Ternary<VMInstanceVO, ReservationContext, ItWorkVO> result = Transaction.execute(new TransactionCallbackWithException<Ternary<VMInstanceVO, ReservationContext, ItWorkVO>, NoTransitionException>() { @Override public Ternary<VMInstanceVO, ReservationContext, ItWorkVO> doInTransaction(final TransactionStatus status) throws NoTransitionException { final Journal journal = new Journal.LogJournal("Creating " + vm, s_logger); final ItWorkVO work = _workDao.persist(workFinal); final ReservationContextImpl context = new ReservationContextImpl(work.getId(), journal, caller, account); if (stateTransitTo(vm, Event.StartRequested, null, work.getId())) { if (s_logger.isDebugEnabled()) { s_logger.debug("Successfully transitioned to start state for " + vm + " reservation id = " + work.getId()); } return new Ternary<VMInstanceVO, ReservationContext, ItWorkVO>(vm, context, work); } return new Ternary<VMInstanceVO, ReservationContext, ItWorkVO>(null, null, work); } }); work = result.third(); if (result.first() != null) { return result; } } catch (final NoTransitionException e) { if (s_logger.isDebugEnabled()) { s_logger.debug("Unable to transition into Starting state due to " + e.getMessage()); } } final VMInstanceVO instance = _vmDao.findById(vmId); if (instance == null) { throw new ConcurrentOperationException("Unable to acquire lock on " + vm); } if (s_logger.isDebugEnabled()) { s_logger.debug("Determining why we're unable to update the state to Starting for " + instance + ". Retry=" + retry); } final State state = instance.getState(); if (state == State.Running) { if (s_logger.isDebugEnabled()) { s_logger.debug("VM is already started: " + vm); } return null; } if (state.isTransitional()) { if (!checkWorkItems(vm, state)) { throw new ConcurrentOperationException("There are concurrent operations on " + vm); } else { continue; } } if (state != State.Stopped) { s_logger.debug("VM " + vm + " is not in a state to be started: " + state); return null; } } throw new ConcurrentOperationException("Unable to change the state of " + vm); } protected <T extends VMInstanceVO> boolean changeState(final T vm, final Event event, final Long hostId, final ItWorkVO work, final Step step) throws NoTransitionException { // FIXME: We should do this better. Step previousStep = null; if (work != null) { previousStep = work.getStep(); _workDao.updateStep(work, step); } boolean result = false; try { result = stateTransitTo(vm, event, hostId); return result; } finally { if (!result && work != null) { _workDao.updateStep(work, previousStep); } } } protected boolean areAffinityGroupsAssociated(final VirtualMachineProfile vmProfile) { final VirtualMachine vm = vmProfile.getVirtualMachine(); final long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId()); if (vmGroupCount > 0) { return true; } return false; } @Override public void advanceStart(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params, final DeploymentPlanner planner) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { advanceStart(vmUuid, params, null, planner); } @Override public void advanceStart(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params, final DeploymentPlan planToDeploy, final DeploymentPlanner planner) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if ( jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { // avoid re-entrance VmWorkJobVO placeHolder = null; final VirtualMachine vm = _vmDao.findByUuid(vmUuid); placeHolder = createPlaceHolderWork(vm.getId()); try { orchestrateStart(vmUuid, params, planToDeploy, planner); } finally { if (placeHolder != null) { _workJobDao.expunge(placeHolder.getId()); } } } else { final Outcome<VirtualMachine> outcome = startVmThroughJobQueue(vmUuid, params, planToDeploy, planner); try { final VirtualMachine vm = outcome.get(); } catch (final InterruptedException e) { throw new RuntimeException("Operation is interrupted", e); } catch (final java.util.concurrent.ExecutionException e) { throw new RuntimeException("Execution excetion", e); } final Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob()); if (jobResult != null) { if (jobResult instanceof ConcurrentOperationException) { throw (ConcurrentOperationException)jobResult; } else if (jobResult instanceof ResourceUnavailableException) { throw (ResourceUnavailableException)jobResult; } else if (jobResult instanceof InsufficientCapacityException) { throw (InsufficientCapacityException)jobResult; } else if (jobResult instanceof RuntimeException) { throw (RuntimeException)jobResult; } else if (jobResult instanceof Throwable) { throw new RuntimeException("Unexpected exception", (Throwable)jobResult); } } } } private void setupAgentSecurity(final Host vmHost, final Map<String, String> sshAccessDetails, final VirtualMachine vm) throws AgentUnavailableException, OperationTimedoutException { final String csr = caManager.generateKeyStoreAndCsr(vmHost, sshAccessDetails); if (!Strings.isNullOrEmpty(csr)) { final Map<String, String> ipAddressDetails = new HashMap<>(sshAccessDetails); ipAddressDetails.remove(NetworkElementCommand.ROUTER_NAME); final Certificate certificate = caManager.issueCertificate(csr, Arrays.asList(vm.getHostName(), vm.getInstanceName()), new ArrayList<>(ipAddressDetails.values()), CAManager.CertValidityPeriod.value(), null); final boolean result = caManager.deployCertificate(vmHost, certificate, false, sshAccessDetails); if (!result) { s_logger.error("Failed to setup certificate for system vm: " + vm.getInstanceName()); } } else { s_logger.error("Failed to setup keystore and generate CSR for system vm: " + vm.getInstanceName()); } } @Override public void orchestrateStart(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params, final DeploymentPlan planToDeploy, final DeploymentPlanner planner) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { final CallContext cctxt = CallContext.current(); final Account account = cctxt.getCallingAccount(); final User caller = cctxt.getCallingUser(); VMInstanceVO vm = _vmDao.findByUuid(vmUuid); final VirtualMachineGuru vmGuru = getVmGuru(vm); final Ternary<VMInstanceVO, ReservationContext, ItWorkVO> start = changeToStartState(vmGuru, vm, caller, account); if (start == null) { return; } vm = start.first(); final ReservationContext ctx = start.second(); ItWorkVO work = start.third(); VMInstanceVO startedVm = null; final ServiceOfferingVO offering = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()); final VirtualMachineTemplate template = _entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, vm.getTemplateId()); DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), vm.getPodIdToDeployIn(), null, null, null, null, ctx); if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) { if (s_logger.isDebugEnabled()) { s_logger.debug("advanceStart: DeploymentPlan is provided, using dcId:" + planToDeploy.getDataCenterId() + ", podId: " + planToDeploy.getPodId() + ", clusterId: " + planToDeploy.getClusterId() + ", hostId: " + planToDeploy.getHostId() + ", poolId: " + planToDeploy.getPoolId()); } plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), planToDeploy.getPoolId(), planToDeploy.getPhysicalNetworkId(), ctx); } final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType()); boolean canRetry = true; ExcludeList avoids = null; try { final Journal journal = start.second().getJournal(); if (planToDeploy != null) { avoids = planToDeploy.getAvoids(); } if (avoids == null) { avoids = new ExcludeList(); } if (s_logger.isDebugEnabled()) { s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid()); } boolean planChangedByVolume = false; boolean reuseVolume = true; final DataCenterDeployment originalPlan = plan; int retry = StartRetry.value(); while (retry-- != 0) { // It's != so that it can match -1. if (reuseVolume) { // edit plan if this vm's ROOT volume is in READY state already final List<VolumeVO> vols = _volsDao.findReadyRootVolumesByInstance(vm.getId()); for (final VolumeVO vol : vols) { // make sure if the templateId is unchanged. If it is changed, // let planner // reassign pool for the volume even if it ready. final Long volTemplateId = vol.getTemplateId(); if (volTemplateId != null && volTemplateId.longValue() != template.getId()) { if (s_logger.isDebugEnabled()) { s_logger.debug(vol + " of " + vm + " is READY, but template ids don't match, let the planner reassign a new pool"); } continue; } final StoragePool pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(vol.getPoolId()); if (!pool.isInMaintenance()) { if (s_logger.isDebugEnabled()) { s_logger.debug("Root volume is ready, need to place VM in volume's cluster"); } final long rootVolDcId = pool.getDataCenterId(); final Long rootVolPodId = pool.getPodId(); final Long rootVolClusterId = pool.getClusterId(); if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) { final Long clusterIdSpecified = planToDeploy.getClusterId(); if (clusterIdSpecified != null && rootVolClusterId != null) { if (rootVolClusterId.longValue() != clusterIdSpecified.longValue()) { // cannot satisfy the plan passed in to the // planner if (s_logger.isDebugEnabled()) { s_logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " + rootVolClusterId + ", cluster specified: " + clusterIdSpecified); } throw new ResourceUnavailableException( "Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for " + vm, Cluster.class, clusterIdSpecified); } } plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), vol.getPoolId(), null, ctx); } else { plan = new DataCenterDeployment(rootVolDcId, rootVolPodId, rootVolClusterId, null, vol.getPoolId(), null, ctx); if (s_logger.isDebugEnabled()) { s_logger.debug(vol + " is READY, changing deployment plan to use this pool's dcId: " + rootVolDcId + " , podId: " + rootVolPodId + " , and clusterId: " + rootVolClusterId); } planChangedByVolume = true; } } } } final Account owner = _entityMgr.findById(Account.class, vm.getAccountId()); final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, template, offering, owner, params); DeployDestination dest = null; try { dest = _dpMgr.planDeployment(vmProfile, plan, avoids, planner); } catch (final AffinityConflictException e2) { s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2); throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict"); } if (dest == null) { if (planChangedByVolume) { plan = originalPlan; planChangedByVolume = false; //do not enter volume reuse for next retry, since we want to look for resources outside the volume's cluster reuseVolume = false; continue; } throw new InsufficientServerCapacityException("Unable to create a deployment for " + vmProfile, DataCenter.class, plan.getDataCenterId(), areAffinityGroupsAssociated(vmProfile)); } if (dest != null) { avoids.addHost(dest.getHost().getId()); journal.record("Deployment found ", vmProfile, dest); } long destHostId = dest.getHost().getId(); vm.setPodIdToDeployIn(dest.getPod().getId()); final Long cluster_id = dest.getCluster().getId(); final ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio"); final ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio"); //storing the value of overcommit in the vm_details table for doing a capacity check in case the cluster overcommit ratio is changed. if (userVmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") == null && (Float.parseFloat(cluster_detail_cpu.getValue()) > 1f || Float.parseFloat(cluster_detail_ram.getValue()) > 1f)) { userVmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue(), true); userVmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue(), true); } else if (userVmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") != null) { userVmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue(), true); userVmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue(), true); } vmProfile.setCpuOvercommitRatio(Float.parseFloat(cluster_detail_cpu.getValue())); vmProfile.setMemoryOvercommitRatio(Float.parseFloat(cluster_detail_ram.getValue())); StartAnswer startAnswer = null; try { if (!changeState(vm, Event.OperationRetry, destHostId, work, Step.Prepare)) { throw new ConcurrentOperationException("Unable to update the state of the Virtual Machine "+vm.getUuid()+" oldstate: "+vm.getState()+ "Event :"+Event.OperationRetry); } } catch (final NoTransitionException e1) { throw new ConcurrentOperationException(e1.getMessage()); } try { _networkMgr.prepare(vmProfile, new DeployDestination(dest.getDataCenter(), dest.getPod(), null, null, dest.getStorageForDisks()), ctx); if (vm.getHypervisorType() != HypervisorType.BareMetal) { volumeMgr.prepare(vmProfile, dest); } //since StorageMgr succeeded in volume creation, reuse Volume for further tries until current cluster has capacity if (!reuseVolume) { reuseVolume = true; } Commands cmds = null; vmGuru.finalizeVirtualMachineProfile(vmProfile, dest, ctx); final VirtualMachineTO vmTO = hvGuru.implement(vmProfile); handlePath(vmTO.getDisks(), vm.getHypervisorType()); cmds = new Commands(Command.OnError.Stop); cmds.addCommand(new StartCommand(vmTO, dest.getHost(), getExecuteInSequence(vm.getHypervisorType()))); vmGuru.finalizeDeployment(cmds, vmProfile, dest, ctx); work = _workDao.findById(work.getId()); if (work == null || work.getStep() != Step.Prepare) { throw new ConcurrentOperationException("Work steps have been changed: " + work); } _workDao.updateStep(work, Step.Starting); _agentMgr.send(destHostId, cmds); _workDao.updateStep(work, Step.Started); startAnswer = cmds.getAnswer(StartAnswer.class); if (startAnswer != null && startAnswer.getResult()) { handlePath(vmTO.getDisks(), startAnswer.getIqnToData()); final String host_guid = startAnswer.getHost_guid(); if (host_guid != null) { final HostVO finalHost = _resourceMgr.findHostByGuid(host_guid); if (finalHost == null) { throw new CloudRuntimeException("Host Guid " + host_guid + " doesn't exist in DB, something went wrong while processing start answer: "+startAnswer); } destHostId = finalHost.getId(); } if (vmGuru.finalizeStart(vmProfile, destHostId, cmds, ctx)) { syncDiskChainChange(startAnswer); if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) { s_logger.error("Unable to transition to a new state. VM uuid: "+vm.getUuid()+ "VM oldstate:"+vm.getState()+"Event:"+Event.OperationSucceeded); throw new ConcurrentOperationException("Failed to deploy VM"+ vm.getUuid()); } // Update GPU device capacity final GPUDeviceTO gpuDevice = startAnswer.getVirtualMachine().getGpuDevice(); if (gpuDevice != null) { _resourceMgr.updateGPUDetails(destHostId, gpuDevice.getGroupDetails()); } // Remove the information on whether it was a deploy vm request.The deployvm=true information // is set only when the vm is being deployed. When a vm is started from a stop state the // information isn't set, if (userVmDetailsDao.findDetail(vm.getId(), "deployvm") != null) { userVmDetailsDao.removeDetail(vm.getId(), "deployvm"); } startedVm = vm; if (s_logger.isDebugEnabled()) { s_logger.debug("Start completed for VM " + vm); } final Host vmHost = _hostDao.findById(destHostId); if (vmHost != null && (VirtualMachine.Type.ConsoleProxy.equals(vm.getType()) || VirtualMachine.Type.SecondaryStorageVm.equals(vm.getType())) && caManager.canProvisionCertificates()) { final Map<String, String> sshAccessDetails = _networkMgr.getSystemVMAccessDetails(vm); for (int retries = 3; retries > 0; retries--) { try { setupAgentSecurity(vmHost, sshAccessDetails, vm); return; } catch (final Exception e) { s_logger.error("Retrying after catching exception while trying to secure agent for systemvm id=" + vm.getId(), e); } } throw new CloudRuntimeException("Failed to setup and secure agent for systemvm id=" + vm.getId()); } return; } else { if (s_logger.isDebugEnabled()) { s_logger.info("The guru did not like the answers so stopping " + vm); } StopCommand stopCmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), false); stopCmd.setControlIp(getControlNicIpForVM(vm)); final StopCommand cmd = stopCmd; final Answer answer = _agentMgr.easySend(destHostId, cmd); if (answer != null && answer instanceof StopAnswer) { final StopAnswer stopAns = (StopAnswer)answer; if (vm.getType() == VirtualMachine.Type.User) { final String platform = stopAns.getPlatform(); if (platform != null) { final Map<String,String> vmmetadata = new HashMap<String,String>(); vmmetadata.put(vm.getInstanceName(), platform); syncVMMetaData(vmmetadata); } } } if (answer == null || !answer.getResult()) { s_logger.warn("Unable to stop " + vm + " due to " + (answer != null ? answer.getDetails() : "no answers")); _haMgr.scheduleStop(vm, destHostId, WorkType.ForceStop); throw new ExecutionException("Unable to stop this VM, "+vm.getUuid()+" so we are unable to retry the start operation"); } throw new ExecutionException("Unable to start VM:"+vm.getUuid()+" due to error in finalizeStart, not retrying"); } } s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails())); if (startAnswer != null && startAnswer.getContextParam("stopRetry") != null) { break; } } catch (OperationTimedoutException e) { s_logger.debug("Unable to send the start command to host " + dest.getHost()+" failed to start VM: "+vm.getUuid()); if (e.isActive()) { _haMgr.scheduleStop(vm, destHostId, WorkType.CheckStop); } canRetry = false; throw new AgentUnavailableException("Unable to start " + vm.getHostName(), destHostId, e); } catch (final ResourceUnavailableException e) { s_logger.info("Unable to contact resource.", e); if (!avoids.add(e)) { if (e.getScope() == Volume.class || e.getScope() == Nic.class) { throw e; } else { s_logger.warn("unexpected ResourceUnavailableException : " + e.getScope().getName(), e); throw e; } } } catch (final InsufficientCapacityException e) { s_logger.info("Insufficient capacity ", e); if (!avoids.add(e)) { if (e.getScope() == Volume.class || e.getScope() == Nic.class) { throw e; } else { s_logger.warn("unexpected InsufficientCapacityException : " + e.getScope().getName(), e); } } } catch (final ExecutionException e) { s_logger.error("Failed to start instance " + vm, e); throw new AgentUnavailableException("Unable to start instance due to " + e.getMessage(), destHostId, e); } catch (final NoTransitionException e) { s_logger.error("Failed to start instance " + vm, e); throw new AgentUnavailableException("Unable to start instance due to " + e.getMessage(), destHostId, e); } finally { if (startedVm == null && canRetry) { final Step prevStep = work.getStep(); _workDao.updateStep(work, Step.Release); // If previous step was started/ing && we got a valid answer if ((prevStep == Step.Started || prevStep == Step.Starting) && startAnswer != null && startAnswer.getResult()) { //TODO check the response of cleanup and record it in DB for retry cleanup(vmGuru, vmProfile, work, Event.OperationFailed, false); } else { //if step is not starting/started, send cleanup command with force=true cleanup(vmGuru, vmProfile, work, Event.OperationFailed, true); } } } } } finally { if (startedVm == null) { if (canRetry) { try { changeState(vm, Event.OperationFailed, null, work, Step.Done); } catch (final NoTransitionException e) { throw new ConcurrentOperationException(e.getMessage()); } } } if (planToDeploy != null) { planToDeploy.setAvoids(avoids); } } if (startedVm == null) { throw new CloudRuntimeException("Unable to start instance '" + vm.getHostName() + "' (" + vm.getUuid() + "), see management server log for details"); } } // for managed storage on KVM, need to make sure the path field of the volume in question is populated with the IQN private void handlePath(final DiskTO[] disks, final HypervisorType hypervisorType) { if (hypervisorType != HypervisorType.KVM) { return; } if (disks != null) { for (final DiskTO disk : disks) { final Map<String, String> details = disk.getDetails(); final boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED)); if (isManaged && disk.getPath() == null) { final Long volumeId = disk.getData().getId(); final VolumeVO volume = _volsDao.findById(volumeId); disk.setPath(volume.get_iScsiName()); if (disk.getData() instanceof VolumeObjectTO) { final VolumeObjectTO volTo = (VolumeObjectTO)disk.getData(); volTo.setPath(volume.get_iScsiName()); } volume.setPath(volume.get_iScsiName()); _volsDao.update(volumeId, volume); } } } } // for managed storage on XenServer and VMware, need to update the DB with a path if the VDI/VMDK file was newly created private void handlePath(final DiskTO[] disks, final Map<String, Map<String, String>> iqnToData) { if (disks != null && iqnToData != null) { for (final DiskTO disk : disks) { final Map<String, String> details = disk.getDetails(); final boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED)); if (isManaged) { final Long volumeId = disk.getData().getId(); final VolumeVO volume = _volsDao.findById(volumeId); final String iScsiName = volume.get_iScsiName(); boolean update = false; final Map<String, String> data = iqnToData.get(iScsiName); if (data != null) { final String path = data.get(StartAnswer.PATH); if (path != null) { volume.setPath(path); update = true; } final String imageFormat = data.get(StartAnswer.IMAGE_FORMAT); if (imageFormat != null) { volume.setFormat(ImageFormat.valueOf(imageFormat)); update = true; } if (update) { _volsDao.update(volumeId, volume); } } } } } } private void syncDiskChainChange(final StartAnswer answer) { final VirtualMachineTO vmSpec = answer.getVirtualMachine(); for (final DiskTO disk : vmSpec.getDisks()) { if (disk.getType() != Volume.Type.ISO) { final VolumeObjectTO vol = (VolumeObjectTO)disk.getData(); final VolumeVO volume = _volsDao.findById(vol.getId()); // Use getPath() from VolumeVO to get a fresh copy of what's in the DB. // Before doing this, in a certain situation, getPath() from VolumeObjectTO // returned null instead of an actual path (because it was out of date with the DB). if(vol.getPath() != null) { volumeMgr.updateVolumeDiskChain(vol.getId(), vol.getPath(), vol.getChainInfo()); } else { volumeMgr.updateVolumeDiskChain(vol.getId(), volume.getPath(), vol.getChainInfo()); } } } } @Override public void stop(final String vmUuid) throws ResourceUnavailableException { try { advanceStop(vmUuid, false); } catch (final OperationTimedoutException e) { throw new AgentUnavailableException("Unable to stop vm because the operation to stop timed out", e.getAgentId(), e); } catch (final ConcurrentOperationException e) { throw new CloudRuntimeException("Unable to stop vm because of a concurrent operation", e); } } @Override public void stopForced(String vmUuid) throws ResourceUnavailableException { try { advanceStop(vmUuid, true); } catch (final OperationTimedoutException e) { throw new AgentUnavailableException("Unable to stop vm because the operation to stop timed out", e.getAgentId(), e); } catch (final ConcurrentOperationException e) { throw new CloudRuntimeException("Unable to stop vm because of a concurrent operation", e); } } @Override public boolean getExecuteInSequence(final HypervisorType hypervisorType) { if (HypervisorType.KVM == hypervisorType || HypervisorType.XenServer == hypervisorType || HypervisorType.Hyperv == hypervisorType || HypervisorType.LXC == hypervisorType) { return false; } else if (HypervisorType.VMware == hypervisorType) { final Boolean fullClone = HypervisorGuru.VmwareFullClone.value(); return fullClone; } else { return ExecuteInSequence.value(); } } private List<Map<String, String>> getVolumesToDisconnect(VirtualMachine vm) { List<Map<String, String>> volumesToDisconnect = new ArrayList<>(); List<VolumeVO> volumes = _volsDao.findByInstance(vm.getId()); if (CollectionUtils.isEmpty(volumes)) { return volumesToDisconnect; } for (VolumeVO volume : volumes) { StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId()); if (storagePool != null && storagePool.isManaged()) { Map<String, String> info = new HashMap<>(3); info.put(DiskTO.STORAGE_HOST, storagePool.getHostAddress()); info.put(DiskTO.STORAGE_PORT, String.valueOf(storagePool.getPort())); info.put(DiskTO.IQN, volume.get_iScsiName()); volumesToDisconnect.add(info); } } return volumesToDisconnect; } protected boolean sendStop(final VirtualMachineGuru guru, final VirtualMachineProfile profile, final boolean force, final boolean checkBeforeCleanup) { final VirtualMachine vm = profile.getVirtualMachine(); StopCommand stpCmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), checkBeforeCleanup); stpCmd.setControlIp(getControlNicIpForVM(vm)); stpCmd.setVolumesToDisconnect(getVolumesToDisconnect(vm)); final StopCommand stop = stpCmd; try { Answer answer = null; if(vm.getHostId() != null) { answer = _agentMgr.send(vm.getHostId(), stop); } if (answer != null && answer instanceof StopAnswer) { final StopAnswer stopAns = (StopAnswer)answer; if (vm.getType() == VirtualMachine.Type.User) { final String platform = stopAns.getPlatform(); if (platform != null) { final UserVmVO userVm = _userVmDao.findById(vm.getId()); _userVmDao.loadDetails(userVm); userVm.setDetail("platform", platform); _userVmDao.saveDetails(userVm); } } final GPUDeviceTO gpuDevice = stop.getGpuDevice(); if (gpuDevice != null) { _resourceMgr.updateGPUDetails(vm.getHostId(), gpuDevice.getGroupDetails()); } if (!answer.getResult()) { final String details = answer.getDetails(); s_logger.debug("Unable to stop VM due to " + details); return false; } guru.finalizeStop(profile, answer); } else { s_logger.error("Invalid answer received in response to a StopCommand for " + vm.getInstanceName()); return false; } } catch (final AgentUnavailableException e) { if (!force) { return false; } } catch (final OperationTimedoutException e) { if (!force) { return false; } } return true; } protected boolean cleanup(final VirtualMachineGuru guru, final VirtualMachineProfile profile, final ItWorkVO work, final Event event, final boolean cleanUpEvenIfUnableToStop) { final VirtualMachine vm = profile.getVirtualMachine(); final State state = vm.getState(); s_logger.debug("Cleaning up resources for the vm " + vm + " in " + state + " state"); try { if (state == State.Starting) { if (work != null) { final Step step = work.getStep(); if (step == Step.Starting && !cleanUpEvenIfUnableToStop) { s_logger.warn("Unable to cleanup vm " + vm + "; work state is incorrect: " + step); return false; } if (step == Step.Started || step == Step.Starting || step == Step.Release) { if (vm.getHostId() != null) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { s_logger.warn("Failed to stop vm " + vm + " in " + State.Starting + " state as a part of cleanup process"); return false; } } } if (step != Step.Release && step != Step.Prepare && step != Step.Started && step != Step.Starting) { s_logger.debug("Cleanup is not needed for vm " + vm + "; work state is incorrect: " + step); return true; } } else { if (vm.getHostId() != null) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { s_logger.warn("Failed to stop vm " + vm + " in " + State.Starting + " state as a part of cleanup process"); return false; } } } } else if (state == State.Stopping) { if (vm.getHostId() != null) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { s_logger.warn("Failed to stop vm " + vm + " in " + State.Stopping + " state as a part of cleanup process"); return false; } } } else if (state == State.Migrating) { if (vm.getHostId() != null) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { s_logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process"); return false; } } if (vm.getLastHostId() != null) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { s_logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process"); return false; } } } else if (state == State.Running) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { s_logger.warn("Failed to stop vm " + vm + " in " + State.Running + " state as a part of cleanup process"); return false; } } } finally { try { _networkMgr.release(profile, cleanUpEvenIfUnableToStop); s_logger.debug("Successfully released network resources for the vm " + vm); } catch (final Exception e) { s_logger.warn("Unable to release some network resources.", e); } volumeMgr.release(profile); s_logger.debug(String.format("Successfully cleaned up resources for the VM %s in %s state", vm, state)); } return true; } @Override public void advanceStop(final String vmUuid, final boolean cleanUpEvenIfUnableToStop) throws AgentUnavailableException, OperationTimedoutException, ConcurrentOperationException { final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { // avoid re-entrance VmWorkJobVO placeHolder = null; final VirtualMachine vm = _vmDao.findByUuid(vmUuid); placeHolder = createPlaceHolderWork(vm.getId()); try { orchestrateStop(vmUuid, cleanUpEvenIfUnableToStop); } finally { if (placeHolder != null) { _workJobDao.expunge(placeHolder.getId()); } } } else { final Outcome<VirtualMachine> outcome = stopVmThroughJobQueue(vmUuid, cleanUpEvenIfUnableToStop); try { final VirtualMachine vm = outcome.get(); } catch (final InterruptedException e) { throw new RuntimeException("Operation is interrupted", e); } catch (final java.util.concurrent.ExecutionException e) { throw new RuntimeException("Execution excetion", e); } final Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob()); if (jobResult != null) { if (jobResult instanceof AgentUnavailableException) { throw (AgentUnavailableException)jobResult; } else if (jobResult instanceof ConcurrentOperationException) { throw (ConcurrentOperationException)jobResult; } else if (jobResult instanceof OperationTimedoutException) { throw (OperationTimedoutException)jobResult; } else if (jobResult instanceof RuntimeException) { throw (RuntimeException)jobResult; } else if (jobResult instanceof Throwable) { throw new RuntimeException("Unexpected exception", (Throwable)jobResult); } } } } private void orchestrateStop(final String vmUuid, final boolean cleanUpEvenIfUnableToStop) throws AgentUnavailableException, OperationTimedoutException, ConcurrentOperationException { final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); advanceStop(vm, cleanUpEvenIfUnableToStop); } private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnableToStop) throws AgentUnavailableException, OperationTimedoutException, ConcurrentOperationException { final State state = vm.getState(); if (state == State.Stopped) { if (s_logger.isDebugEnabled()) { s_logger.debug("VM is already stopped: " + vm); } return; } if (state == State.Destroyed || state == State.Expunging || state == State.Error) { if (s_logger.isDebugEnabled()) { s_logger.debug("Stopped called on " + vm + " but the state is " + state); } return; } // grab outstanding work item if any final ItWorkVO work = _workDao.findByOutstandingWork(vm.getId(), vm.getState()); if (work != null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Found an outstanding work item for this vm " + vm + " with state:" + vm.getState() + ", work id:" + work.getId()); } } final Long hostId = vm.getHostId(); if (hostId == null) { if (!cleanUpEvenIfUnableToStop) { if (s_logger.isDebugEnabled()) { s_logger.debug("HostId is null but this is not a forced stop, cannot stop vm " + vm + " with state:" + vm.getState()); } throw new CloudRuntimeException("Unable to stop " + vm); } try { stateTransitTo(vm, Event.AgentReportStopped, null, null); } catch (final NoTransitionException e) { s_logger.warn(e.getMessage()); } // mark outstanding work item if any as done if (work != null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Updating work item to Done, id:" + work.getId()); } work.setStep(Step.Done); _workDao.update(work.getId(), work); } return; } else { HostVO host = _hostDao.findById(hostId); if (!cleanUpEvenIfUnableToStop && vm.getState() == State.Running && host.getResourceState() == ResourceState.PrepareForMaintenance) { s_logger.debug("Host is in PrepareForMaintenance state - Stop VM operation on the VM id: " + vm.getId() + " is not allowed"); throw new CloudRuntimeException("Stop VM operation on the VM id: " + vm.getId() + " is not allowed as host is preparing for maintenance mode"); } } final VirtualMachineGuru vmGuru = getVmGuru(vm); final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); try { if (!stateTransitTo(vm, Event.StopRequested, vm.getHostId())) { throw new ConcurrentOperationException("VM is being operated on."); } } catch (final NoTransitionException e1) { if (!cleanUpEvenIfUnableToStop) { throw new CloudRuntimeException("We cannot stop " + vm + " when it is in state " + vm.getState()); } final boolean doCleanup = true; if (s_logger.isDebugEnabled()) { s_logger.debug("Unable to transition the state but we're moving on because it's forced stop"); } if (doCleanup) { if (cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.StopRequested, cleanUpEvenIfUnableToStop)) { try { if (s_logger.isDebugEnabled() && work != null) { s_logger.debug("Updating work item to Done, id:" + work.getId()); } if (!changeState(vm, Event.AgentReportStopped, null, work, Step.Done)) { throw new CloudRuntimeException("Unable to stop " + vm); } } catch (final NoTransitionException e) { s_logger.warn("Unable to cleanup " + vm); throw new CloudRuntimeException("Unable to stop " + vm, e); } } else { if (s_logger.isDebugEnabled()) { s_logger.debug("Failed to cleanup VM: " + vm); } throw new CloudRuntimeException("Failed to cleanup " + vm + " , current state " + vm.getState()); } } } if (vm.getState() != State.Stopping) { throw new CloudRuntimeException("We cannot proceed with stop VM " + vm + " since it is not in 'Stopping' state, current state: " + vm.getState()); } vmGuru.prepareStop(profile); final StopCommand stop = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), false, cleanUpEvenIfUnableToStop); stop.setControlIp(getControlNicIpForVM(vm)); boolean stopped = false; Answer answer = null; try { answer = _agentMgr.send(vm.getHostId(), stop); if (answer != null) { if (answer instanceof StopAnswer) { final StopAnswer stopAns = (StopAnswer)answer; if (vm.getType() == VirtualMachine.Type.User) { final String platform = stopAns.getPlatform(); if (platform != null) { final UserVmVO userVm = _userVmDao.findById(vm.getId()); _userVmDao.loadDetails(userVm); userVm.setDetail("platform", platform); _userVmDao.saveDetails(userVm); } } } stopped = answer.getResult(); if (!stopped) { throw new CloudRuntimeException("Unable to stop the virtual machine due to " + answer.getDetails()); } vmGuru.finalizeStop(profile, answer); final GPUDeviceTO gpuDevice = stop.getGpuDevice(); if (gpuDevice != null) { _resourceMgr.updateGPUDetails(vm.getHostId(), gpuDevice.getGroupDetails()); } } else { throw new CloudRuntimeException("Invalid answer received in response to a StopCommand on " + vm.instanceName); } } catch (final AgentUnavailableException e) { s_logger.warn("Unable to stop vm, agent unavailable: " + e.toString()); } catch (final OperationTimedoutException e) { s_logger.warn("Unable to stop vm, operation timed out: " + e.toString()); } finally { if (!stopped) { if (!cleanUpEvenIfUnableToStop) { s_logger.warn("Unable to stop vm " + vm); try { stateTransitTo(vm, Event.OperationFailed, vm.getHostId()); } catch (final NoTransitionException e) { s_logger.warn("Unable to transition the state " + vm); } throw new CloudRuntimeException("Unable to stop " + vm); } else { s_logger.warn("Unable to actually stop " + vm + " but continue with release because it's a force stop"); vmGuru.finalizeStop(profile, answer); } } } if (s_logger.isDebugEnabled()) { s_logger.debug(vm + " is stopped on the host. Proceeding to release resource held."); } try { _networkMgr.release(profile, cleanUpEvenIfUnableToStop); s_logger.debug("Successfully released network resources for the vm " + vm); } catch (final Exception e) { s_logger.warn("Unable to release some network resources.", e); } try { if (vm.getHypervisorType() != HypervisorType.BareMetal) { volumeMgr.release(profile); s_logger.debug("Successfully released storage resources for the vm " + vm); } } catch (final Exception e) { s_logger.warn("Unable to release storage resources.", e); } try { if (work != null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Updating the outstanding work item to Done, id:" + work.getId()); } work.setStep(Step.Done); _workDao.update(work.getId(), work); } if (!stateTransitTo(vm, Event.OperationSucceeded, null)) { throw new CloudRuntimeException("unable to stop " + vm); } } catch (final NoTransitionException e) { s_logger.warn(e.getMessage()); throw new CloudRuntimeException("Unable to stop " + vm); } } private void setStateMachine() { _stateMachine = VirtualMachine.State.getStateMachine(); } protected boolean stateTransitTo(final VMInstanceVO vm, final VirtualMachine.Event e, final Long hostId, final String reservationId) throws NoTransitionException { // if there are active vm snapshots task, state change is not allowed // Disable this hacking thing, VM snapshot task need to be managed by its orchestartion flow istelf instead of // hacking it here at general VM manager /* if (_vmSnapshotMgr.hasActiveVMSnapshotTasks(vm.getId())) { s_logger.error("State transit with event: " + e + " failed due to: " + vm.getInstanceName() + " has active VM snapshots tasks"); return false; } */ vm.setReservationId(reservationId); return _stateMachine.transitTo(vm, e, new Pair<Long, Long>(vm.getHostId(), hostId), _vmDao); } @Override public boolean stateTransitTo(final VirtualMachine vm1, final VirtualMachine.Event e, final Long hostId) throws NoTransitionException { final VMInstanceVO vm = (VMInstanceVO)vm1; /* * Remove the hacking logic here. // if there are active vm snapshots task, state change is not allowed if (_vmSnapshotMgr.hasActiveVMSnapshotTasks(vm.getId())) { s_logger.error("State transit with event: " + e + " failed due to: " + vm.getInstanceName() + " has active VM snapshots tasks"); return false; } */ final State oldState = vm.getState(); if (oldState == State.Starting) { if (e == Event.OperationSucceeded) { vm.setLastHostId(hostId); } } else if (oldState == State.Stopping) { if (e == Event.OperationSucceeded) { vm.setLastHostId(vm.getHostId()); } } return _stateMachine.transitTo(vm, e, new Pair<Long, Long>(vm.getHostId(), hostId), _vmDao); } @Override public void destroy(final String vmUuid, final boolean expunge) throws AgentUnavailableException, OperationTimedoutException, ConcurrentOperationException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); if (vm == null || vm.getState() == State.Destroyed || vm.getState() == State.Expunging || vm.getRemoved() != null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Unable to find vm or vm is destroyed: " + vm); } return; } if (s_logger.isDebugEnabled()) { s_logger.debug("Destroying vm " + vm + ", expunge flag " + (expunge ? "on" : "off")); } advanceStop(vmUuid, VmDestroyForcestop.value()); deleteVMSnapshots(vm, expunge); Transaction.execute(new TransactionCallbackWithExceptionNoReturn<CloudRuntimeException>() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) throws CloudRuntimeException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); try { if (!stateTransitTo(vm, VirtualMachine.Event.DestroyRequested, vm.getHostId())) { s_logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm); throw new CloudRuntimeException("Unable to destroy " + vm); } else { if (expunge) { if (!stateTransitTo(vm, VirtualMachine.Event.ExpungeOperation, vm.getHostId())) { s_logger.debug("Unable to expunge the vm because it is not in the correct state: " + vm); throw new CloudRuntimeException("Unable to expunge " + vm); } } } } catch (final NoTransitionException e) { s_logger.debug(e.getMessage()); throw new CloudRuntimeException("Unable to destroy " + vm, e); } } }); } /** * Delete vm snapshots depending on vm's hypervisor type. For Vmware, vm snapshots removal is delegated to vm cleanup thread * to reduce tasks sent to hypervisor (one tasks to delete vm snapshots and vm itself * instead of one task for each vm snapshot plus another for the vm) * @param vm vm * @param expunge indicates if vm should be expunged */ private void deleteVMSnapshots(VMInstanceVO vm, boolean expunge) { if (! vm.getHypervisorType().equals(HypervisorType.VMware)) { if (!_vmSnapshotMgr.deleteAllVMSnapshots(vm.getId(), null)) { s_logger.debug("Unable to delete all snapshots for " + vm); throw new CloudRuntimeException("Unable to delete vm snapshots for " + vm); } } else { if (expunge) { _vmSnapshotMgr.deleteVMSnapshotsFromDB(vm.getId()); } } } protected boolean checkVmOnHost(final VirtualMachine vm, final long hostId) throws AgentUnavailableException, OperationTimedoutException { final Answer answer = _agentMgr.send(hostId, new CheckVirtualMachineCommand(vm.getInstanceName())); if (answer == null || !answer.getResult()) { return false; } if (answer instanceof CheckVirtualMachineAnswer) { final CheckVirtualMachineAnswer vmAnswer = (CheckVirtualMachineAnswer)answer; if (vmAnswer.getState() == PowerState.PowerOff) { return false; } } UserVmVO userVm = _userVmDao.findById(vm.getId()); if (userVm != null) { List<VMSnapshotVO> vmSnapshots = _vmSnapshotDao.findByVm(vm.getId()); RestoreVMSnapshotCommand command = _vmSnapshotMgr.createRestoreCommand(userVm, vmSnapshots); if (command != null) { RestoreVMSnapshotAnswer restoreVMSnapshotAnswer = (RestoreVMSnapshotAnswer) _agentMgr.send(hostId, command); if (restoreVMSnapshotAnswer == null || !restoreVMSnapshotAnswer.getResult()) { s_logger.warn("Unable to restore the vm snapshot from image file after live migration of vm with vmsnapshots: " + restoreVMSnapshotAnswer.getDetails()); } } } return true; } @Override public void storageMigration(final String vmUuid, final StoragePool destPool) { final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { // avoid re-entrance VmWorkJobVO placeHolder = null; final VirtualMachine vm = _vmDao.findByUuid(vmUuid); placeHolder = createPlaceHolderWork(vm.getId()); try { orchestrateStorageMigration(vmUuid, destPool); } finally { if (placeHolder != null) { _workJobDao.expunge(placeHolder.getId()); } } } else { final Outcome<VirtualMachine> outcome = migrateVmStorageThroughJobQueue(vmUuid, destPool); try { final VirtualMachine vm = outcome.get(); } catch (final InterruptedException e) { throw new RuntimeException("Operation is interrupted", e); } catch (final java.util.concurrent.ExecutionException e) { throw new RuntimeException("Execution excetion", e); } final Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob()); if (jobResult != null) { if (jobResult instanceof RuntimeException) { throw (RuntimeException)jobResult; } else if (jobResult instanceof Throwable) { throw new RuntimeException("Unexpected exception", (Throwable)jobResult); } } } } private void orchestrateStorageMigration(final String vmUuid, final StoragePool destPool) { final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); if (destPool == null) { throw new CloudRuntimeException("Unable to migrate vm: missing destination storage pool"); } try { stateTransitTo(vm, VirtualMachine.Event.StorageMigrationRequested, null); } catch (final NoTransitionException e) { s_logger.debug("Unable to migrate vm: " + e.toString()); throw new CloudRuntimeException("Unable to migrate vm: " + e.toString()); } final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); boolean migrationResult = false; try { migrationResult = volumeMgr.storageMigration(profile, destPool); if (migrationResult) { //if the vm is migrated to different pod in basic mode, need to reallocate ip if (destPool.getPodId() != null && !destPool.getPodId().equals(vm.getPodIdToDeployIn())) { final DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), destPool.getPodId(), null, null, null, null); final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, null, null, null, null); _networkMgr.reallocate(vmProfile, plan); } //when start the vm next time, don;'t look at last_host_id, only choose the host based on volume/storage pool vm.setLastHostId(null); vm.setPodIdToDeployIn(destPool.getPodId()); // If VM was cold migrated between clusters belonging to two different VMware DCs, // unregister the VM from the source host and cleanup the associated VM files. if (vm.getHypervisorType().equals(HypervisorType.VMware)) { Long srcClusterId = null; Long srcHostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId(); if (srcHostId != null) { HostVO srcHost = _hostDao.findById(srcHostId); srcClusterId = srcHost.getClusterId(); } final Long destClusterId = destPool.getClusterId(); if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId)) { final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId); final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId); if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) { s_logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: " + vm.getInstanceName() + " from source host: " + srcHostId); final UnregisterVMCommand uvc = new UnregisterVMCommand(vm.getInstanceName()); uvc.setCleanupVmFiles(true); try { _agentMgr.send(srcHostId, uvc); } catch (final AgentUnavailableException | OperationTimedoutException e) { throw new CloudRuntimeException("Failed to unregister VM: " + vm.getInstanceName() + " from source host: " + srcHostId + " after successfully migrating VM's storage across VMware Datacenters"); } } } } } else { s_logger.debug("Storage migration failed"); } } catch (final ConcurrentOperationException e) { s_logger.debug("Failed to migration: " + e.toString()); throw new CloudRuntimeException("Failed to migration: " + e.toString()); } catch (final InsufficientVirtualNetworkCapacityException e) { s_logger.debug("Failed to migration: " + e.toString()); throw new CloudRuntimeException("Failed to migration: " + e.toString()); } catch (final InsufficientAddressCapacityException e) { s_logger.debug("Failed to migration: " + e.toString()); throw new CloudRuntimeException("Failed to migration: " + e.toString()); } catch (final InsufficientCapacityException e) { s_logger.debug("Failed to migration: " + e.toString()); throw new CloudRuntimeException("Failed to migration: " + e.toString()); } catch (final StorageUnavailableException e) { s_logger.debug("Failed to migration: " + e.toString()); throw new CloudRuntimeException("Failed to migration: " + e.toString()); } finally { try { stateTransitTo(vm, VirtualMachine.Event.AgentReportStopped, null); } catch (final NoTransitionException e) { s_logger.debug("Failed to change vm state: " + e.toString()); throw new CloudRuntimeException("Failed to change vm state: " + e.toString()); } } } @Override public void migrate(final String vmUuid, final long srcHostId, final DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException { final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { // avoid re-entrance VmWorkJobVO placeHolder = null; final VirtualMachine vm = _vmDao.findByUuid(vmUuid); placeHolder = createPlaceHolderWork(vm.getId()); try { orchestrateMigrate(vmUuid, srcHostId, dest); } finally { if (placeHolder != null) { _workJobDao.expunge(placeHolder.getId()); } } } else { final Outcome<VirtualMachine> outcome = migrateVmThroughJobQueue(vmUuid, srcHostId, dest); try { final VirtualMachine vm = outcome.get(); } catch (final InterruptedException e) { throw new RuntimeException("Operation is interrupted", e); } catch (final java.util.concurrent.ExecutionException e) { throw new RuntimeException("Execution excetion", e); } final Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob()); if (jobResult != null) { if (jobResult instanceof ResourceUnavailableException) { throw (ResourceUnavailableException)jobResult; } else if (jobResult instanceof ConcurrentOperationException) { throw (ConcurrentOperationException)jobResult; } else if (jobResult instanceof RuntimeException) { throw (RuntimeException)jobResult; } else if (jobResult instanceof Throwable) { throw new RuntimeException("Unexpected exception", (Throwable)jobResult); } } } } private void orchestrateMigrate(final String vmUuid, final long srcHostId, final DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException { final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); if (vm == null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Unable to find the vm " + vmUuid); } throw new CloudRuntimeException("Unable to find a virtual machine with id " + vmUuid); } migrate(vm, srcHostId, dest); } protected void migrate(final VMInstanceVO vm, final long srcHostId, final DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException { s_logger.info("Migrating " + vm + " to " + dest); final long dstHostId = dest.getHost().getId(); final Host fromHost = _hostDao.findById(srcHostId); if (fromHost == null) { s_logger.info("Unable to find the host to migrate from: " + srcHostId); throw new CloudRuntimeException("Unable to find the host to migrate from: " + srcHostId); } if (fromHost.getClusterId().longValue() != dest.getCluster().getId()) { final List<VolumeVO> volumes = _volsDao.findCreatedByInstance(vm.getId()); for (final VolumeVO volume : volumes) { if (!_storagePoolDao.findById(volume.getPoolId()).getScope().equals(ScopeType.ZONE)) { s_logger.info("Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: " + dest.getHost().getId()); throw new CloudRuntimeException( "Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: " + dest.getHost().getId()); } } } final VirtualMachineGuru vmGuru = getVmGuru(vm); if (vm.getState() != State.Running) { if (s_logger.isDebugEnabled()) { s_logger.debug("VM is not Running, unable to migrate the vm " + vm); } throw new CloudRuntimeException("VM is not Running, unable to migrate the vm currently " + vm + " , current state: " + vm.getState().toString()); } AlertManager.AlertType alertType = AlertManager.AlertType.ALERT_TYPE_USERVM_MIGRATE; if (VirtualMachine.Type.DomainRouter.equals(vm.getType())) { alertType = AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER_MIGRATE; } else if (VirtualMachine.Type.ConsoleProxy.equals(vm.getType())) { alertType = AlertManager.AlertType.ALERT_TYPE_CONSOLE_PROXY_MIGRATE; } final VirtualMachineProfile vmSrc = new VirtualMachineProfileImpl(vm); for (final NicProfile nic : _networkMgr.getNicProfiles(vm)) { vmSrc.addNic(nic); } final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm, null, _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()), null, null); _networkMgr.prepareNicForMigration(profile, dest); volumeMgr.prepareForMigration(profile, dest); profile.setConfigDriveLabel(VmConfigDriveLabel.value()); final VirtualMachineTO to = toVmTO(profile); final PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(to); ItWorkVO work = new ItWorkVO(UUID.randomUUID().toString(), _nodeId, State.Migrating, vm.getType(), vm.getId()); work.setStep(Step.Prepare); work.setResourceType(ItWorkVO.ResourceType.Host); work.setResourceId(dstHostId); work = _workDao.persist(work); Answer pfma = null; try { pfma = _agentMgr.send(dstHostId, pfmc); if (pfma == null || !pfma.getResult()) { final String details = pfma != null ? pfma.getDetails() : "null answer returned"; final String msg = "Unable to prepare for migration due to " + details; pfma = null; throw new AgentUnavailableException(msg, dstHostId); } } catch (final OperationTimedoutException e1) { throw new AgentUnavailableException("Operation timed out", dstHostId); } finally { if (pfma == null) { _networkMgr.rollbackNicForMigration(vmSrc, profile); work.setStep(Step.Done); _workDao.update(work.getId(), work); } } vm.setLastHostId(srcHostId); try { if (vm == null || vm.getHostId() == null || vm.getHostId() != srcHostId || !changeState(vm, Event.MigrationRequested, dstHostId, work, Step.Migrating)) { _networkMgr.rollbackNicForMigration(vmSrc, profile); s_logger.info("Migration cancelled because state has changed: " + vm); throw new ConcurrentOperationException("Migration cancelled because state has changed: " + vm); } } catch (final NoTransitionException e1) { _networkMgr.rollbackNicForMigration(vmSrc, profile); s_logger.info("Migration cancelled because " + e1.getMessage()); throw new ConcurrentOperationException("Migration cancelled because " + e1.getMessage()); } boolean migrated = false; try { final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); final MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to, getExecuteInSequence(vm.getHypervisorType())); String autoConvergence = _configDao.getValue(Config.KvmAutoConvergence.toString()); boolean kvmAutoConvergence = Boolean.parseBoolean(autoConvergence); mc.setAutoConvergence(kvmAutoConvergence); mc.setHostGuid(dest.getHost().getGuid()); try { final Answer ma = _agentMgr.send(vm.getLastHostId(), mc); if (ma == null || !ma.getResult()) { final String details = ma != null ? ma.getDetails() : "null answer returned"; throw new CloudRuntimeException(details); } } catch (final OperationTimedoutException e) { if (e.isActive()) { s_logger.warn("Active migration command so scheduling a restart for " + vm); _haMgr.scheduleRestart(vm, true); } throw new AgentUnavailableException("Operation timed out on migrating " + vm, dstHostId); } try { if (!changeState(vm, VirtualMachine.Event.OperationSucceeded, dstHostId, work, Step.Started)) { throw new ConcurrentOperationException("Unable to change the state for " + vm); } } catch (final NoTransitionException e1) { throw new ConcurrentOperationException("Unable to change state due to " + e1.getMessage()); } try { if (!checkVmOnHost(vm, dstHostId)) { s_logger.error("Unable to complete migration for " + vm); try { _agentMgr.send(srcHostId, new Commands(cleanup(vm)), null); } catch (final AgentUnavailableException e) { s_logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("Unable to complete migration for " + vm); } } catch (final OperationTimedoutException e) { s_logger.debug("Error while checking the vm " + vm + " on host " + dstHostId, e); } migrated = true; } finally { if (!migrated) { s_logger.info("Migration was unsuccessful. Cleaning up: " + vm); _networkMgr.rollbackNicForMigration(vmSrc, profile); _alertMgr.sendAlert(alertType, fromHost.getDataCenterId(), fromHost.getPodId(), "Unable to migrate vm " + vm.getInstanceName() + " from host " + fromHost.getName() + " in zone " + dest.getDataCenter().getName() + " and pod " + dest.getPod().getName(), "Migrate Command failed. Please check logs."); try { _agentMgr.send(dstHostId, new Commands(cleanup(vm)), null); } catch (final AgentUnavailableException ae) { s_logger.info("Looks like the destination Host is unavailable for cleanup"); } try { stateTransitTo(vm, Event.OperationFailed, srcHostId); } catch (final NoTransitionException e) { s_logger.warn(e.getMessage()); } } else { _networkMgr.commitNicForMigration(vmSrc, profile); } work.setStep(Step.Done); _workDao.update(work.getId(), work); } } /** * We create the mapping of volumes and storage pool to migrate the VMs according to the information sent by the user. * If the user did not enter a complete mapping, the volumes that were left behind will be auto mapped using {@link #createStoragePoolMappingsForVolumes(VirtualMachineProfile, Host, Map, List)} */ protected Map<Volume, StoragePool> createMappingVolumeAndStoragePool(VirtualMachineProfile profile, Host targetHost, Map<Long, Long> userDefinedMapOfVolumesAndStoragePools) { Map<Volume, StoragePool> volumeToPoolObjectMap = buildMapUsingUserInformation(profile, targetHost, userDefinedMapOfVolumesAndStoragePools); List<Volume> volumesNotMapped = findVolumesThatWereNotMappedByTheUser(profile, volumeToPoolObjectMap); createStoragePoolMappingsForVolumes(profile, targetHost, volumeToPoolObjectMap, volumesNotMapped); return volumeToPoolObjectMap; } /** * Given the map of volume to target storage pool entered by the user, we check for other volumes that the VM might have and were not configured. * This map can be then used by CloudStack to find new target storage pools according to the target host. */ protected List<Volume> findVolumesThatWereNotMappedByTheUser(VirtualMachineProfile profile, Map<Volume, StoragePool> volumeToStoragePoolObjectMap) { List<VolumeVO> allVolumes = _volsDao.findUsableVolumesForInstance(profile.getId()); List<Volume> volumesNotMapped = new ArrayList<>(); for (Volume volume : allVolumes) { if (!volumeToStoragePoolObjectMap.containsKey(volume)) { volumesNotMapped.add(volume); } } return volumesNotMapped; } /** * Builds the map of storage pools and volumes with the information entered by the user. Before creating the an entry we validate if the migration is feasible checking if the migration is allowed and if the target host can access the defined target storage pool. */ protected Map<Volume, StoragePool> buildMapUsingUserInformation(VirtualMachineProfile profile, Host targetHost, Map<Long, Long> userDefinedVolumeToStoragePoolMap) { Map<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<>(); if (MapUtils.isEmpty(userDefinedVolumeToStoragePoolMap)) { return volumeToPoolObjectMap; } for(Long volumeId: userDefinedVolumeToStoragePoolMap.keySet()) { VolumeVO volume = _volsDao.findById(volumeId); Long poolId = userDefinedVolumeToStoragePoolMap.get(volumeId); StoragePoolVO targetPool = _storagePoolDao.findById(poolId); StoragePoolVO currentPool = _storagePoolDao.findById(volume.getPoolId()); executeManagedStorageChecksWhenTargetStoragePoolProvided(currentPool, volume, targetPool); if (_poolHostDao.findByPoolHost(targetPool.getId(), targetHost.getId()) == null) { throw new CloudRuntimeException( String.format("Cannot migrate the volume [%s] to the storage pool [%s] while migrating VM [%s] to target host [%s]. The host does not have access to the storage pool entered.", volume.getUuid(), targetPool.getUuid(), profile.getUuid(), targetHost.getUuid())); } if (currentPool.getId() == targetPool.getId()) { s_logger.info(String.format("The volume [%s] is already allocated in storage pool [%s].", volume.getUuid(), targetPool.getUuid())); } volumeToPoolObjectMap.put(volume, targetPool); } return volumeToPoolObjectMap; } /** * Executes the managed storage checks for the mapping<volume, storage pool> entered by the user. The checks execute by this method are the following. * <ul> * <li> If the current storage pool of the volume is not a managed storage, we do not need to validate anything here. * <li> If the current storage pool is a managed storage and the target storage pool ID is different from the current one, we throw an exception. * </ul> */ protected void executeManagedStorageChecksWhenTargetStoragePoolProvided(StoragePoolVO currentPool, VolumeVO volume, StoragePoolVO targetPool) { if (!currentPool.isManaged()) { return; } if (currentPool.getId() == targetPool.getId()) { return; } throw new CloudRuntimeException(String.format("Currently, a volume on managed storage can only be 'migrated' to itself " + "[volumeId=%s, currentStoragePoolId=%s, targetStoragePoolId=%s].", volume.getUuid(), currentPool.getUuid(), targetPool.getUuid())); } /** * For each one of the volumes we will map it to a storage pool that is available via the target host. * An exception is thrown if we cannot find a storage pool that is accessible in the target host to migrate the volume to. */ protected void createStoragePoolMappingsForVolumes(VirtualMachineProfile profile, Host targetHost, Map<Volume, StoragePool> volumeToPoolObjectMap, List<Volume> allVolumes) { for (Volume volume : allVolumes) { StoragePoolVO currentPool = _storagePoolDao.findById(volume.getPoolId()); executeManagedStorageChecksWhenTargetStoragePoolNotProvided(targetHost, currentPool, volume); if (ScopeType.HOST.equals(currentPool.getScope()) || isStorageCrossClusterMigration(targetHost, currentPool)) { createVolumeToStoragePoolMappingIfPossible(profile, targetHost, volumeToPoolObjectMap, volume, currentPool); } else { volumeToPoolObjectMap.put(volume, currentPool); } } } /** * Executes the managed storage checks for the volumes that the user has not entered a mapping of <volume, storage pool>. The following checks are performed. * <ul> * <li> If the current storage pool is not a managed storage, we do not need to proceed with this method; * <li> We check if the target host has access to the current managed storage pool. If it does not have an exception will be thrown. * </ul> */ protected void executeManagedStorageChecksWhenTargetStoragePoolNotProvided(Host targetHost, StoragePoolVO currentPool, Volume volume) { if (!currentPool.isManaged()) { return; } if (_poolHostDao.findByPoolHost(currentPool.getId(), targetHost.getId()) == null) { throw new CloudRuntimeException(String.format("The target host does not have access to the volume's managed storage pool. [volumeId=%s, storageId=%s, targetHostId=%s].", volume.getUuid(), currentPool.getUuid(), targetHost.getUuid())); } } /** * Return true if the VM migration is a cross cluster migration. To execute that, we check if the volume current storage pool cluster is different from the target host cluster. */ protected boolean isStorageCrossClusterMigration(Host targetHost, StoragePoolVO currentPool) { return ScopeType.CLUSTER.equals(currentPool.getScope()) && currentPool.getClusterId() != targetHost.getClusterId(); } /** * We will add a mapping of volume to storage pool if needed. The conditions to add a mapping are the following: * <ul> * <li> The candidate storage pool where the volume is to be allocated can be accessed by the target host * <li> If no storage pool is found to allocate the volume we throw an exception. * </ul> * * Side note: this method should only be called if the volume is on local storage or if we are executing a cross cluster migration. */ protected void createVolumeToStoragePoolMappingIfPossible(VirtualMachineProfile profile, Host targetHost, Map<Volume, StoragePool> volumeToPoolObjectMap, Volume volume, StoragePoolVO currentPool) { List<StoragePool> storagePoolList = getCandidateStoragePoolsToMigrateLocalVolume(profile, targetHost, volume); if (CollectionUtils.isEmpty(storagePoolList)) { throw new CloudRuntimeException(String.format("There is not storage pools available at the target host [%s] to migrate volume [%s]", targetHost.getUuid(), volume.getUuid())); } Collections.shuffle(storagePoolList); boolean canTargetHostAccessVolumeCurrentStoragePool = false; for (StoragePool storagePool : storagePoolList) { if (storagePool.getId() == currentPool.getId()) { canTargetHostAccessVolumeCurrentStoragePool = true; break; } } if (!canTargetHostAccessVolumeCurrentStoragePool) { volumeToPoolObjectMap.put(volume, _storagePoolDao.findByUuid(storagePoolList.get(0).getUuid())); } } /** * We use {@link StoragePoolAllocator} objects to find storage pools connected to the targetHost where we would be able to allocate the given volume. */ protected List<StoragePool> getCandidateStoragePoolsToMigrateLocalVolume(VirtualMachineProfile profile, Host targetHost, Volume volume) { List<StoragePool> poolList = new ArrayList<>(); DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType()); DataCenterDeployment plan = new DataCenterDeployment(targetHost.getDataCenterId(), targetHost.getPodId(), targetHost.getClusterId(), targetHost.getId(), null, null); ExcludeList avoid = new ExcludeList(); StoragePoolVO volumeStoragePool = _storagePoolDao.findById(volume.getPoolId()); if (volumeStoragePool.isLocal()) { diskProfile.setUseLocalStorage(true); } for (StoragePoolAllocator allocator : _storagePoolAllocators) { List<StoragePool> poolListFromAllocator = allocator.allocateToPool(diskProfile, profile, plan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL); if (CollectionUtils.isEmpty(poolListFromAllocator)) { continue; } for (StoragePool pool : poolListFromAllocator) { if (pool.isLocal() || isStorageCrossClusterMigration(targetHost, volumeStoragePool)) { poolList.add(pool); } } } return poolList; } private <T extends VMInstanceVO> void moveVmToMigratingState(final T vm, final Long hostId, final ItWorkVO work) throws ConcurrentOperationException { // Put the vm in migrating state. try { if (!changeState(vm, Event.MigrationRequested, hostId, work, Step.Migrating)) { s_logger.info("Migration cancelled because state has changed: " + vm); throw new ConcurrentOperationException("Migration cancelled because state has changed: " + vm); } } catch (final NoTransitionException e) { s_logger.info("Migration cancelled because " + e.getMessage()); throw new ConcurrentOperationException("Migration cancelled because " + e.getMessage()); } } private <T extends VMInstanceVO> void moveVmOutofMigratingStateOnSuccess(final T vm, final Long hostId, final ItWorkVO work) throws ConcurrentOperationException { // Put the vm in running state. try { if (!changeState(vm, Event.OperationSucceeded, hostId, work, Step.Started)) { s_logger.error("Unable to change the state for " + vm); throw new ConcurrentOperationException("Unable to change the state for " + vm); } } catch (final NoTransitionException e) { s_logger.error("Unable to change state due to " + e.getMessage()); throw new ConcurrentOperationException("Unable to change state due to " + e.getMessage()); } } @Override public void migrateWithStorage(final String vmUuid, final long srcHostId, final long destHostId, final Map<Long, Long> volumeToPool) throws ResourceUnavailableException, ConcurrentOperationException { final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { // avoid re-entrance VmWorkJobVO placeHolder = null; final VirtualMachine vm = _vmDao.findByUuid(vmUuid); placeHolder = createPlaceHolderWork(vm.getId()); try { orchestrateMigrateWithStorage(vmUuid, srcHostId, destHostId, volumeToPool); } finally { if (placeHolder != null) { _workJobDao.expunge(placeHolder.getId()); } } } else { final Outcome<VirtualMachine> outcome = migrateVmWithStorageThroughJobQueue(vmUuid, srcHostId, destHostId, volumeToPool); try { final VirtualMachine vm = outcome.get(); } catch (final InterruptedException e) { throw new RuntimeException("Operation is interrupted", e); } catch (final java.util.concurrent.ExecutionException e) { throw new RuntimeException("Execution excetion", e); } final Object jobException = _jobMgr.unmarshallResultObject(outcome.getJob()); if (jobException != null) { if (jobException instanceof ResourceUnavailableException) { throw (ResourceUnavailableException)jobException; } else if (jobException instanceof ConcurrentOperationException) { throw (ConcurrentOperationException)jobException; } else if (jobException instanceof RuntimeException) { throw (RuntimeException)jobException; } else if (jobException instanceof Throwable) { throw new RuntimeException("Unexpected exception", (Throwable)jobException); } } } } private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHostId, final long destHostId, final Map<Long, Long> volumeToPool) throws ResourceUnavailableException, ConcurrentOperationException { final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); final HostVO srcHost = _hostDao.findById(srcHostId); final HostVO destHost = _hostDao.findById(destHostId); final VirtualMachineGuru vmGuru = getVmGuru(vm); final DataCenterVO dc = _dcDao.findById(destHost.getDataCenterId()); final HostPodVO pod = _podDao.findById(destHost.getPodId()); final Cluster cluster = _clusterDao.findById(destHost.getClusterId()); final DeployDestination destination = new DeployDestination(dc, pod, cluster, destHost); // Create a map of which volume should go in which storage pool. final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); final Map<Volume, StoragePool> volumeToPoolMap = createMappingVolumeAndStoragePool(profile, destHost, volumeToPool); // If none of the volumes have to be migrated, fail the call. Administrator needs to make a call for migrating // a vm and not migrating a vm with storage. if (volumeToPoolMap == null || volumeToPoolMap.isEmpty()) { throw new InvalidParameterValueException("Migration of the vm " + vm + "from host " + srcHost + " to destination host " + destHost + " doesn't involve migrating the volumes."); } AlertManager.AlertType alertType = AlertManager.AlertType.ALERT_TYPE_USERVM_MIGRATE; if (VirtualMachine.Type.DomainRouter.equals(vm.getType())) { alertType = AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER_MIGRATE; } else if (VirtualMachine.Type.ConsoleProxy.equals(vm.getType())) { alertType = AlertManager.AlertType.ALERT_TYPE_CONSOLE_PROXY_MIGRATE; } _networkMgr.prepareNicForMigration(profile, destination); volumeMgr.prepareForMigration(profile, destination); final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType()); final VirtualMachineTO to = hvGuru.implement(profile); ItWorkVO work = new ItWorkVO(UUID.randomUUID().toString(), _nodeId, State.Migrating, vm.getType(), vm.getId()); work.setStep(Step.Prepare); work.setResourceType(ItWorkVO.ResourceType.Host); work.setResourceId(destHostId); work = _workDao.persist(work); // Put the vm in migrating state. vm.setLastHostId(srcHostId); vm.setPodIdToDeployIn(destHost.getPodId()); moveVmToMigratingState(vm, destHostId, work); boolean migrated = false; try { // config drive: Detach the config drive at source host // After migration successful attach the config drive in destination host // On migration failure VM will be stopped, So configIso will be deleted Nic defaultNic = _networkModel.getDefaultNic(vm.getId()); List<String[]> vmData = null; if (defaultNic != null) { UserVmVO userVm = _userVmDao.findById(vm.getId()); Map<String, String> details = userVmDetailsDao.listDetailsKeyPairs(vm.getId()); userVm.setDetails(details); Network network = _networkModel.getNetwork(defaultNic.getNetworkId()); if (_networkModel.isSharedNetworkWithoutServices(network.getId())) { final String serviceOffering = _serviceOfferingDao.findByIdIncludingRemoved(vm.getId(), vm.getServiceOfferingId()).getDisplayText(); boolean isWindows = _guestOSCategoryDao.findById(_guestOSDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); vmData = _networkModel.generateVmData(userVm.getUserData(), serviceOffering, vm.getDataCenterId(), vm.getInstanceName(), vm.getHostName(), vm.getId(), vm.getUuid(), defaultNic.getMacAddress(), userVm.getDetail("SSH.PublicKey"), (String) profile.getParameter(VirtualMachineProfile.Param.VmPassword), isWindows); String vmName = vm.getInstanceName(); String configDriveIsoRootFolder = "/tmp"; String isoFile = configDriveIsoRootFolder + "/" + vmName + "/configDrive/" + vmName + ".iso"; profile.setVmData(vmData); profile.setConfigDriveLabel(VmConfigDriveLabel.value()); profile.setConfigDriveIsoRootFolder(configDriveIsoRootFolder); profile.setConfigDriveIsoFile(isoFile); // At source host detach the config drive iso. AttachOrDettachConfigDriveCommand dettachCommand = new AttachOrDettachConfigDriveCommand(vm.getInstanceName(), vmData, VmConfigDriveLabel.value(), false); try { _agentMgr.send(srcHost.getId(), dettachCommand); s_logger.debug("Deleted config drive ISO for vm " + vm.getInstanceName() + " In host " + srcHost); } catch (OperationTimedoutException e) { s_logger.debug("TIme out occured while exeuting command AttachOrDettachConfigDrive " + e.getMessage()); } } } // Migrate the vm and its volume. volumeMgr.migrateVolumes(vm, to, srcHost, destHost, volumeToPoolMap); // Put the vm back to running state. moveVmOutofMigratingStateOnSuccess(vm, destHost.getId(), work); try { if (!checkVmOnHost(vm, destHostId)) { s_logger.error("Vm not found on destination host. Unable to complete migration for " + vm); try { _agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null); } catch (final AgentUnavailableException e) { s_logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("VM not found on desintation host. Unable to complete migration for " + vm); } } catch (final OperationTimedoutException e) { s_logger.warn("Error while checking the vm " + vm + " is on host " + destHost, e); } migrated = true; } finally { if (!migrated) { s_logger.info("Migration was unsuccessful. Cleaning up: " + vm); _alertMgr.sendAlert(alertType, srcHost.getDataCenterId(), srcHost.getPodId(), "Unable to migrate vm " + vm.getInstanceName() + " from host " + srcHost.getName() + " in zone " + dc.getName() + " and pod " + dc.getName(), "Migrate Command failed. Please check logs."); try { _agentMgr.send(destHostId, new Commands(cleanup(vm.getInstanceName())), null); vm.setPodIdToDeployIn(srcHost.getPodId()); stateTransitTo(vm, Event.OperationFailed, srcHostId); } catch (final AgentUnavailableException e) { s_logger.warn("Looks like the destination Host is unavailable for cleanup.", e); } catch (final NoTransitionException e) { s_logger.error("Error while transitioning vm from migrating to running state.", e); } } work.setStep(Step.Done); _workDao.update(work.getId(), work); } } @Override public VirtualMachineTO toVmTO(final VirtualMachineProfile profile) { final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(profile.getVirtualMachine().getHypervisorType()); final VirtualMachineTO to = hvGuru.implement(profile); return to; } protected void cancelWorkItems(final long nodeId) { final GlobalLock scanLock = GlobalLock.getInternLock("vmmgr.cancel.workitem"); try { if (scanLock.lock(3)) { try { final List<ItWorkVO> works = _workDao.listWorkInProgressFor(nodeId); for (final ItWorkVO work : works) { s_logger.info("Handling unfinished work item: " + work); try { final VMInstanceVO vm = _vmDao.findById(work.getInstanceId()); if (vm != null) { if (work.getType() == State.Starting) { _haMgr.scheduleRestart(vm, true); work.setManagementServerId(_nodeId); work.setStep(Step.Done); _workDao.update(work.getId(), work); } else if (work.getType() == State.Stopping) { _haMgr.scheduleStop(vm, vm.getHostId(), WorkType.CheckStop); work.setManagementServerId(_nodeId); work.setStep(Step.Done); _workDao.update(work.getId(), work); } else if (work.getType() == State.Migrating) { _haMgr.scheduleMigration(vm); work.setStep(Step.Done); _workDao.update(work.getId(), work); } } } catch (final Exception e) { s_logger.error("Error while handling " + work, e); } } } finally { scanLock.unlock(); } } } finally { scanLock.releaseRef(); } } @Override public void migrateAway(final String vmUuid, final long srcHostId) throws InsufficientServerCapacityException { final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { // avoid re-entrance VmWorkJobVO placeHolder = null; final VirtualMachine vm = _vmDao.findByUuid(vmUuid); placeHolder = createPlaceHolderWork(vm.getId()); try { try { orchestrateMigrateAway(vmUuid, srcHostId, null); } catch (final InsufficientServerCapacityException e) { s_logger.warn("Failed to deploy vm " + vmUuid + " with original planner, sending HAPlanner"); orchestrateMigrateAway(vmUuid, srcHostId, _haMgr.getHAPlanner()); } } finally { _workJobDao.expunge(placeHolder.getId()); } } else { final Outcome<VirtualMachine> outcome = migrateVmAwayThroughJobQueue(vmUuid, srcHostId); try { final VirtualMachine vm = outcome.get(); } catch (final InterruptedException e) { throw new RuntimeException("Operation is interrupted", e); } catch (final java.util.concurrent.ExecutionException e) { throw new RuntimeException("Execution excetion", e); } final Object jobException = _jobMgr.unmarshallResultObject(outcome.getJob()); if (jobException != null) { if (jobException instanceof InsufficientServerCapacityException) { throw (InsufficientServerCapacityException)jobException; } else if (jobException instanceof ConcurrentOperationException) { throw (ConcurrentOperationException)jobException; } else if (jobException instanceof RuntimeException) { throw (RuntimeException)jobException; } else if (jobException instanceof Throwable) { throw new RuntimeException("Unexpected exception", (Throwable)jobException); } } } } private void orchestrateMigrateAway(final String vmUuid, final long srcHostId, final DeploymentPlanner planner) throws InsufficientServerCapacityException { final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); if (vm == null) { s_logger.debug("Unable to find a VM for " + vmUuid); throw new CloudRuntimeException("Unable to find " + vmUuid); } ServiceOfferingVO offeringVO = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()); final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm, null, offeringVO, null, null); final Long hostId = vm.getHostId(); if (hostId == null) { s_logger.debug("Unable to migrate because the VM doesn't have a host id: " + vm); throw new CloudRuntimeException("Unable to migrate " + vmUuid); } final Host host = _hostDao.findById(hostId); Long poolId = null; final List<VolumeVO> vols = _volsDao.findReadyRootVolumesByInstance(vm.getId()); for (final VolumeVO rootVolumeOfVm : vols) { final StoragePoolVO rootDiskPool = _storagePoolDao.findById(rootVolumeOfVm.getPoolId()); if (rootDiskPool != null) { poolId = rootDiskPool.getId(); } } final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), null, poolId, null); final ExcludeList excludes = new ExcludeList(); excludes.addHost(hostId); DeployDestination dest = null; while (true) { try { dest = _dpMgr.planDeployment(profile, plan, excludes, planner); } catch (final AffinityConflictException e2) { s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2); throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict"); } if (dest != null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Found destination " + dest + " for migrating to."); } } else { if (s_logger.isDebugEnabled()) { s_logger.debug("Unable to find destination for migrating the vm " + profile); } throw new InsufficientServerCapacityException("Unable to find a server to migrate to.", host.getClusterId()); } excludes.addHost(dest.getHost().getId()); try { migrate(vm, srcHostId, dest); return; } catch (final ResourceUnavailableException e) { s_logger.debug("Unable to migrate to unavailable " + dest); } catch (final ConcurrentOperationException e) { s_logger.debug("Unable to migrate VM due to: " + e.getMessage()); } try { advanceStop(vmUuid, true); throw new CloudRuntimeException("Unable to migrate " + vm); } catch (final ResourceUnavailableException e) { s_logger.debug("Unable to stop VM due to " + e.getMessage()); throw new CloudRuntimeException("Unable to migrate " + vm); } catch (final ConcurrentOperationException e) { s_logger.debug("Unable to stop VM due to " + e.getMessage()); throw new CloudRuntimeException("Unable to migrate " + vm); } catch (final OperationTimedoutException e) { s_logger.debug("Unable to stop VM due to " + e.getMessage()); throw new CloudRuntimeException("Unable to migrate " + vm); } } } protected class CleanupTask extends ManagedContextRunnable { @Override protected void runInContext() { s_logger.trace("VM Operation Thread Running"); try { _workDao.cleanup(VmOpCleanupWait.value()); final Date cutDate = new Date(DateUtil.currentGMTTime().getTime() - VmOpCleanupInterval.value() * 1000); _workJobDao.expungeCompletedWorkJobs(cutDate); } catch (final Exception e) { s_logger.error("VM Operations failed due to ", e); } } } @Override public boolean isVirtualMachineUpgradable(final VirtualMachine vm, final ServiceOffering offering) { boolean isMachineUpgradable = true; for (final HostAllocator allocator : hostAllocators) { isMachineUpgradable = allocator.isVirtualMachineUpgradable(vm, offering); if (isMachineUpgradable) { continue; } else { break; } } return isMachineUpgradable; } @Override public void reboot(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params) throws InsufficientCapacityException, ResourceUnavailableException { try { advanceReboot(vmUuid, params); } catch (final ConcurrentOperationException e) { throw new CloudRuntimeException("Unable to reboot a VM due to concurrent operation", e); } } @Override public void advanceReboot(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if ( jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { // avoid re-entrance VmWorkJobVO placeHolder = null; final VirtualMachine vm = _vmDao.findByUuid(vmUuid); placeHolder = createPlaceHolderWork(vm.getId()); try { orchestrateReboot(vmUuid, params); } finally { if (placeHolder != null) { _workJobDao.expunge(placeHolder.getId()); } } } else { final Outcome<VirtualMachine> outcome = rebootVmThroughJobQueue(vmUuid, params); try { final VirtualMachine vm = outcome.get(); } catch (final InterruptedException e) { throw new RuntimeException("Operation is interrupted", e); } catch (final java.util.concurrent.ExecutionException e) { throw new RuntimeException("Execution excetion", e); } final Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob()); if (jobResult != null) { if (jobResult instanceof ResourceUnavailableException) { throw (ResourceUnavailableException)jobResult; } else if (jobResult instanceof ConcurrentOperationException) { throw (ConcurrentOperationException)jobResult; } else if (jobResult instanceof InsufficientCapacityException) { throw (InsufficientCapacityException)jobResult; } else if (jobResult instanceof RuntimeException) { throw (RuntimeException)jobResult; } else if (jobResult instanceof Throwable) { throw new RuntimeException("Unexpected exception", (Throwable)jobResult); } } } } private void orchestrateReboot(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); // if there are active vm snapshots task, state change is not allowed if(_vmSnapshotMgr.hasActiveVMSnapshotTasks(vm.getId())){ s_logger.error("Unable to reboot VM " + vm + " due to: " + vm.getInstanceName() + " has active VM snapshots tasks"); throw new CloudRuntimeException("Unable to reboot VM " + vm + " due to: " + vm.getInstanceName() + " has active VM snapshots tasks"); } final DataCenter dc = _entityMgr.findById(DataCenter.class, vm.getDataCenterId()); final Host host = _hostDao.findById(vm.getHostId()); if (host == null) { // Should findById throw an Exception is the host is not found? throw new CloudRuntimeException("Unable to retrieve host with id " + vm.getHostId()); } final Cluster cluster = _entityMgr.findById(Cluster.class, host.getClusterId()); final Pod pod = _entityMgr.findById(Pod.class, host.getPodId()); final DeployDestination dest = new DeployDestination(dc, pod, cluster, host); try { final Commands cmds = new Commands(Command.OnError.Stop); cmds.addCommand(new RebootCommand(vm.getInstanceName(), getExecuteInSequence(vm.getHypervisorType()))); _agentMgr.send(host.getId(), cmds); final Answer rebootAnswer = cmds.getAnswer(RebootAnswer.class); if (rebootAnswer != null && rebootAnswer.getResult()) { return; } s_logger.info("Unable to reboot VM " + vm + " on " + dest.getHost() + " due to " + (rebootAnswer == null ? " no reboot answer" : rebootAnswer.getDetails())); } catch (final OperationTimedoutException e) { s_logger.warn("Unable to send the reboot command to host " + dest.getHost() + " for the vm " + vm + " due to operation timeout", e); throw new CloudRuntimeException("Failed to reboot the vm on host " + dest.getHost()); } } public Command cleanup(final VirtualMachine vm) { StopCommand cmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), false); cmd.setControlIp(getControlNicIpForVM(vm)); return cmd; } private String getControlNicIpForVM(VirtualMachine vm) { if (vm.getType() == VirtualMachine.Type.ConsoleProxy || vm.getType() == VirtualMachine.Type.SecondaryStorageVm) { NicVO nic = _nicsDao.getControlNicForVM(vm.getId()); return nic.getIPv4Address(); } else if (vm.getType() == VirtualMachine.Type.DomainRouter) { return vm.getPrivateIpAddress(); } else { return null; } } public Command cleanup(final String vmName) { VirtualMachine vm = _vmDao.findVMByInstanceName(vmName); StopCommand cmd = new StopCommand(vmName, getExecuteInSequence(null), false); cmd.setControlIp(getControlNicIpForVM(vm)); return cmd; } // this is XenServer specific public void syncVMMetaData(final Map<String, String> vmMetadatum) { if (vmMetadatum == null || vmMetadatum.isEmpty()) { return; } List<Pair<Pair<String, VirtualMachine.Type>, Pair<Long, String>>> vmDetails = _userVmDao.getVmsDetailByNames(vmMetadatum.keySet(), "platform"); for (final Map.Entry<String, String> entry : vmMetadatum.entrySet()) { final String name = entry.getKey(); final String platform = entry.getValue(); if (platform == null || platform.isEmpty()) { continue; } boolean found = false; for(Pair<Pair<String, VirtualMachine.Type>, Pair<Long, String>> vmDetail : vmDetails ) { Pair<String, VirtualMachine.Type> vmNameTypePair = vmDetail.first(); if(vmNameTypePair.first().equals(name)) { found = true; if(vmNameTypePair.second() == VirtualMachine.Type.User) { Pair<Long, String> detailPair = vmDetail.second(); String platformDetail = detailPair.second(); if (platformDetail != null && platformDetail.equals(platform)) { break; } updateVmMetaData(detailPair.first(), platform); } break; } } if(!found) { VMInstanceVO vm = _vmDao.findVMByInstanceName(name); if(vm != null && vm.getType() == VirtualMachine.Type.User) { updateVmMetaData(vm.getId(), platform); } } } } // this is XenServer specific private void updateVmMetaData(Long vmId, String platform) { UserVmVO userVm = _userVmDao.findById(vmId); _userVmDao.loadDetails(userVm); if ( userVm.details.containsKey("timeoffset")) { userVm.details.remove("timeoffset"); } userVm.setDetail("platform", platform); String pvdriver = "xenserver56"; if ( platform.contains("device_id")) { pvdriver = "xenserver61"; } if (!userVm.details.containsKey("hypervisortoolsversion") || !userVm.details.get("hypervisortoolsversion").equals(pvdriver)) { userVm.setDetail("hypervisortoolsversion", pvdriver); } _userVmDao.saveDetails(userVm); } @Override public boolean isRecurring() { return true; } @Override public boolean processAnswers(final long agentId, final long seq, final Answer[] answers) { for (final Answer answer : answers) { if ( answer instanceof ClusterVMMetaDataSyncAnswer) { final ClusterVMMetaDataSyncAnswer cvms = (ClusterVMMetaDataSyncAnswer)answer; if (!cvms.isExecuted()) { syncVMMetaData(cvms.getVMMetaDatum()); cvms.setExecuted(); } } } return true; } @Override public boolean processTimeout(final long agentId, final long seq) { return true; } @Override public int getTimeout() { return -1; } @Override public boolean processCommands(final long agentId, final long seq, final Command[] cmds) { boolean processed = false; for (final Command cmd : cmds) { if (cmd instanceof PingRoutingCommand) { final PingRoutingCommand ping = (PingRoutingCommand)cmd; if (ping.getHostVmStateReport() != null) { _syncMgr.processHostVmStatePingReport(agentId, ping.getHostVmStateReport()); } // take the chance to scan VMs that are stuck in transitional states // and are missing from the report scanStalledVMInTransitionStateOnUpHost(agentId); processed = true; } } return processed; } @Override public AgentControlAnswer processControlCommand(final long agentId, final AgentControlCommand cmd) { return null; } @Override public boolean processDisconnect(final long agentId, final Status state) { return true; } @Override public void processHostAboutToBeRemoved(long hostId) { } @Override public void processHostRemoved(long hostId, long clusterId) { } @Override public void processHostAdded(long hostId) { } @Override public void processConnect(final Host agent, final StartupCommand cmd, final boolean forRebalance) throws ConnectionException { if (!(cmd instanceof StartupRoutingCommand)) { return; } if(s_logger.isDebugEnabled()) { s_logger.debug("Received startup command from hypervisor host. host id: " + agent.getId()); } _syncMgr.resetHostSyncState(agent.getId()); if (forRebalance) { s_logger.debug("Not processing listener " + this + " as connect happens on rebalance process"); return; } final Long clusterId = agent.getClusterId(); final long agentId = agent.getId(); if (agent.getHypervisorType() == HypervisorType.XenServer) { // only for Xen // initiate the cron job final ClusterVMMetaDataSyncCommand syncVMMetaDataCmd = new ClusterVMMetaDataSyncCommand(ClusterVMMetaDataSyncInterval.value(), clusterId); try { final long seq_no = _agentMgr.send(agentId, new Commands(syncVMMetaDataCmd), this); s_logger.debug("Cluster VM metadata sync started with jobid " + seq_no); } catch (final AgentUnavailableException e) { s_logger.fatal("The Cluster VM metadata sync process failed for cluster id " + clusterId + " with ", e); } } } protected class TransitionTask extends ManagedContextRunnable { @Override protected void runInContext() { final GlobalLock lock = GlobalLock.getInternLock("TransitionChecking"); if (lock == null) { s_logger.debug("Couldn't get the global lock"); return; } if (!lock.lock(30)) { s_logger.debug("Couldn't lock the db"); return; } try { scanStalledVMInTransitionStateOnDisconnectedHosts(); final List<VMInstanceVO> instances = _vmDao.findVMInTransition(new Date(DateUtil.currentGMTTime().getTime() - AgentManager.Wait.value() * 1000), State.Starting, State.Stopping); for (final VMInstanceVO instance : instances) { final State state = instance.getState(); if (state == State.Stopping) { _haMgr.scheduleStop(instance, instance.getHostId(), WorkType.CheckStop); } else if (state == State.Starting) { _haMgr.scheduleRestart(instance, true); } } } catch (final Exception e) { s_logger.warn("Caught the following exception on transition checking", e); } finally { lock.unlock(); } } } @Override public VMInstanceVO findById(final long vmId) { return _vmDao.findById(vmId); } @Override public void checkIfCanUpgrade(final VirtualMachine vmInstance, final ServiceOffering newServiceOffering) { if (newServiceOffering == null) { throw new InvalidParameterValueException("Invalid parameter, newServiceOffering can't be null"); } // Check that the VM is stopped / running if (!(vmInstance.getState().equals(State.Stopped) || vmInstance.getState().equals(State.Running))) { s_logger.warn("Unable to upgrade virtual machine " + vmInstance.toString() + " in state " + vmInstance.getState()); throw new InvalidParameterValueException("Unable to upgrade virtual machine " + vmInstance.toString() + " " + " in state " + vmInstance.getState() + "; make sure the virtual machine is stopped/running"); } // Check if the service offering being upgraded to is what the VM is already running with if (!newServiceOffering.isDynamic() && vmInstance.getServiceOfferingId() == newServiceOffering.getId()) { if (s_logger.isInfoEnabled()) { s_logger.info("Not upgrading vm " + vmInstance.toString() + " since it already has the requested " + "service offering (" + newServiceOffering.getName() + ")"); } throw new InvalidParameterValueException("Not upgrading vm " + vmInstance.toString() + " since it already " + "has the requested service offering (" + newServiceOffering.getName() + ")"); } final ServiceOfferingVO currentServiceOffering = _offeringDao.findByIdIncludingRemoved(vmInstance.getId(), vmInstance.getServiceOfferingId()); // Check that the service offering being upgraded to has the same Guest IP type as the VM's current service offering // NOTE: With the new network refactoring in 2.2, we shouldn't need the check for same guest IP type anymore. /* * if (!currentServiceOffering.getGuestIpType().equals(newServiceOffering.getGuestIpType())) { String errorMsg = * "The service offering being upgraded to has a guest IP type: " + newServiceOffering.getGuestIpType(); errorMsg += * ". Please select a service offering with the same guest IP type as the VM's current service offering (" + * currentServiceOffering.getGuestIpType() + ")."; throw new InvalidParameterValueException(errorMsg); } */ // Check that the service offering being upgraded to has the same storage pool preference as the VM's current service // offering if (currentServiceOffering.getUseLocalStorage() != newServiceOffering.getUseLocalStorage()) { throw new InvalidParameterValueException("Unable to upgrade virtual machine " + vmInstance.toString() + ", cannot switch between local storage and shared storage service offerings. Current offering " + "useLocalStorage=" + currentServiceOffering.getUseLocalStorage() + ", target offering useLocalStorage=" + newServiceOffering.getUseLocalStorage()); } // if vm is a system vm, check if it is a system service offering, if yes return with error as it cannot be used for user vms if (currentServiceOffering.getSystemUse() != newServiceOffering.getSystemUse()) { throw new InvalidParameterValueException("isSystem property is different for current service offering and new service offering"); } // Check that there are enough resources to upgrade the service offering if (!isVirtualMachineUpgradable(vmInstance, newServiceOffering)) { throw new InvalidParameterValueException("Unable to upgrade virtual machine, not enough resources available " + "for an offering of " + newServiceOffering.getCpu() + " cpu(s) at " + newServiceOffering.getSpeed() + " Mhz, and " + newServiceOffering.getRamSize() + " MB of memory"); } // Check that the service offering being upgraded to has all the tags of the current service offering. final List<String> currentTags = StringUtils.csvTagsToList(currentServiceOffering.getTags()); final List<String> newTags = StringUtils.csvTagsToList(newServiceOffering.getTags()); if (!newTags.containsAll(currentTags)) { throw new InvalidParameterValueException("Unable to upgrade virtual machine; the current service offering " + " should have tags as subset of " + "the new service offering tags. Current service offering tags: " + currentTags + "; " + "new service " + "offering tags: " + newTags); } } @Override public boolean upgradeVmDb(final long vmId, final long serviceOfferingId) { final VMInstanceVO vmForUpdate = _vmDao.createForUpdate(); vmForUpdate.setServiceOfferingId(serviceOfferingId); final ServiceOffering newSvcOff = _entityMgr.findById(ServiceOffering.class, serviceOfferingId); vmForUpdate.setHaEnabled(newSvcOff.getOfferHA()); vmForUpdate.setLimitCpuUse(newSvcOff.getLimitCpuUse()); vmForUpdate.setServiceOfferingId(newSvcOff.getId()); return _vmDao.update(vmId, vmForUpdate); } @Override public NicProfile addVmToNetwork(final VirtualMachine vm, final Network network, final NicProfile requested) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { // avoid re-entrance VmWorkJobVO placeHolder = null; placeHolder = createPlaceHolderWork(vm.getId()); try { return orchestrateAddVmToNetwork(vm, network, requested); } finally { if (placeHolder != null) { _workJobDao.expunge(placeHolder.getId()); } } } else { final Outcome<VirtualMachine> outcome = addVmToNetworkThroughJobQueue(vm, network, requested); try { outcome.get(); } catch (final InterruptedException e) { throw new RuntimeException("Operation is interrupted", e); } catch (final java.util.concurrent.ExecutionException e) { throw new RuntimeException("Execution exception", e); } final Object jobException = _jobMgr.unmarshallResultObject(outcome.getJob()); if (jobException != null) { if (jobException instanceof ResourceUnavailableException) { throw (ResourceUnavailableException)jobException; } else if (jobException instanceof ConcurrentOperationException) { throw (ConcurrentOperationException)jobException; } else if (jobException instanceof InsufficientCapacityException) { throw (InsufficientCapacityException)jobException; } else if (jobException instanceof RuntimeException) { throw (RuntimeException)jobException; } else if (jobException instanceof Throwable) { throw new RuntimeException("Unexpected exception", (Throwable)jobException); } else if (jobException instanceof NicProfile) { return (NicProfile)jobException; } } throw new RuntimeException("Unexpected job execution result"); } } private NicProfile orchestrateAddVmToNetwork(final VirtualMachine vm, final Network network, final NicProfile requested) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { final CallContext cctx = CallContext.current(); s_logger.debug("Adding vm " + vm + " to network " + network + "; requested nic profile " + requested); final VMInstanceVO vmVO = _vmDao.findById(vm.getId()); final ReservationContext context = new ReservationContextImpl(null, null, cctx.getCallingUser(), cctx.getCallingAccount()); final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmVO, null, null, null, null); final DataCenter dc = _entityMgr.findById(DataCenter.class, network.getDataCenterId()); final Host host = _hostDao.findById(vm.getHostId()); final DeployDestination dest = new DeployDestination(dc, null, null, host); //check vm state if (vm.getState() == State.Running) { //1) allocate and prepare nic final NicProfile nic = _networkMgr.createNicForVm(network, requested, context, vmProfile, true); //2) Convert vmProfile to vmTO final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vmProfile.getVirtualMachine().getHypervisorType()); final VirtualMachineTO vmTO = hvGuru.implement(vmProfile); //3) Convert nicProfile to NicTO final NicTO nicTO = toNicTO(nic, vmProfile.getVirtualMachine().getHypervisorType()); //4) plug the nic to the vm s_logger.debug("Plugging nic for vm " + vm + " in network " + network); boolean result = false; try { result = plugNic(network, nicTO, vmTO, context, dest); if (result) { s_logger.debug("Nic is plugged successfully for vm " + vm + " in network " + network + ". Vm is a part of network now"); final long isDefault = nic.isDefaultNic() ? 1 : 0; // insert nic's Id into DB as resource_name if(VirtualMachine.Type.User.equals(vmVO.getType())) { //Log usage event for user Vms only UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN, vmVO.getAccountId(), vmVO.getDataCenterId(), vmVO.getId(), Long.toString(nic.getId()), network.getNetworkOfferingId(), null, isDefault, VirtualMachine.class.getName(), vmVO.getUuid(), vm.isDisplay()); } return nic; } else { s_logger.warn("Failed to plug nic to the vm " + vm + " in network " + network); return null; } } finally { if (!result) { s_logger.debug("Removing nic " + nic + " from vm " + vmProfile.getVirtualMachine() + " as nic plug failed on the backend"); _networkMgr.removeNic(vmProfile, _nicsDao.findById(nic.getId())); } } } else if (vm.getState() == State.Stopped) { //1) allocate nic return _networkMgr.createNicForVm(network, requested, context, vmProfile, false); } else { s_logger.warn("Unable to add vm " + vm + " to network " + network); throw new ResourceUnavailableException("Unable to add vm " + vm + " to network, is not in the right state", DataCenter.class, vm.getDataCenterId()); } } @Override public NicTO toNicTO(final NicProfile nic, final HypervisorType hypervisorType) { final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(hypervisorType); final NicTO nicTO = hvGuru.toNicTO(nic); return nicTO; } @Override public boolean removeNicFromVm(final VirtualMachine vm, final Nic nic) throws ConcurrentOperationException, ResourceUnavailableException { final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { // avoid re-entrance VmWorkJobVO placeHolder = null; placeHolder = createPlaceHolderWork(vm.getId()); try { return orchestrateRemoveNicFromVm(vm, nic); } finally { if (placeHolder != null) { _workJobDao.expunge(placeHolder.getId()); } } } else { final Outcome<VirtualMachine> outcome = removeNicFromVmThroughJobQueue(vm, nic); try { outcome.get(); } catch (final InterruptedException e) { throw new RuntimeException("Operation is interrupted", e); } catch (final java.util.concurrent.ExecutionException e) { throw new RuntimeException("Execution excetion", e); } final Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob()); if (jobResult != null) { if (jobResult instanceof ResourceUnavailableException) { throw (ResourceUnavailableException)jobResult; } else if (jobResult instanceof ConcurrentOperationException) { throw (ConcurrentOperationException)jobResult; } else if (jobResult instanceof RuntimeException) { throw (RuntimeException)jobResult; } else if (jobResult instanceof Throwable) { throw new RuntimeException("Unexpected exception", (Throwable)jobResult); } else if (jobResult instanceof Boolean) { return (Boolean)jobResult; } } throw new RuntimeException("Job failed with un-handled exception"); } } private boolean orchestrateRemoveNicFromVm(final VirtualMachine vm, final Nic nic) throws ConcurrentOperationException, ResourceUnavailableException { final CallContext cctx = CallContext.current(); final VMInstanceVO vmVO = _vmDao.findById(vm.getId()); final NetworkVO network = _networkDao.findById(nic.getNetworkId()); final ReservationContext context = new ReservationContextImpl(null, null, cctx.getCallingUser(), cctx.getCallingAccount()); final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmVO, null, null, null, null); final DataCenter dc = _entityMgr.findById(DataCenter.class, network.getDataCenterId()); final Host host = _hostDao.findById(vm.getHostId()); final DeployDestination dest = new DeployDestination(dc, null, null, host); final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vmProfile.getVirtualMachine().getHypervisorType()); final VirtualMachineTO vmTO = hvGuru.implement(vmProfile); final NicProfile nicProfile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), _networkModel.getNetworkRate(network.getId(), vm.getId()), _networkModel.isSecurityGroupSupportedInNetwork(network), _networkModel.getNetworkTag(vmProfile.getVirtualMachine().getHypervisorType(), network)); //1) Unplug the nic if (vm.getState() == State.Running) { final NicTO nicTO = toNicTO(nicProfile, vmProfile.getVirtualMachine().getHypervisorType()); s_logger.debug("Un-plugging nic " + nic + " for vm " + vm + " from network " + network); final boolean result = unplugNic(network, nicTO, vmTO, context, dest); if (result) { s_logger.debug("Nic is unplugged successfully for vm " + vm + " in network " + network); final long isDefault = nic.isDefaultNic() ? 1 : 0; UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_REMOVE, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), Long.toString(nic.getId()), network.getNetworkOfferingId(), null, isDefault, VirtualMachine.class.getName(), vm.getUuid(), vm.isDisplay()); } else { s_logger.warn("Failed to unplug nic for the vm " + vm + " from network " + network); return false; } } else if (vm.getState() != State.Stopped) { s_logger.warn("Unable to remove vm " + vm + " from network " + network); throw new ResourceUnavailableException("Unable to remove vm " + vm + " from network, is not in the right state", DataCenter.class, vm.getDataCenterId()); } //2) Release the nic _networkMgr.releaseNic(vmProfile, nic); s_logger.debug("Successfully released nic " + nic + "for vm " + vm); //3) Remove the nic _networkMgr.removeNic(vmProfile, nic); _nicsDao.expunge(nic.getId()); return true; } @Override @DB public boolean removeVmFromNetwork(final VirtualMachine vm, final Network network, final URI broadcastUri) throws ConcurrentOperationException, ResourceUnavailableException { // TODO will serialize on the VM object later to resolve operation conflicts return orchestrateRemoveVmFromNetwork(vm, network, broadcastUri); } @DB private boolean orchestrateRemoveVmFromNetwork(final VirtualMachine vm, final Network network, final URI broadcastUri) throws ConcurrentOperationException, ResourceUnavailableException { final CallContext cctx = CallContext.current(); final VMInstanceVO vmVO = _vmDao.findById(vm.getId()); final ReservationContext context = new ReservationContextImpl(null, null, cctx.getCallingUser(), cctx.getCallingAccount()); final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmVO, null, null, null, null); final DataCenter dc = _entityMgr.findById(DataCenter.class, network.getDataCenterId()); final Host host = _hostDao.findById(vm.getHostId()); final DeployDestination dest = new DeployDestination(dc, null, null, host); final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vmProfile.getVirtualMachine().getHypervisorType()); final VirtualMachineTO vmTO = hvGuru.implement(vmProfile); Nic nic = null; if (broadcastUri != null) { nic = _nicsDao.findByNetworkIdInstanceIdAndBroadcastUri(network.getId(), vm.getId(), broadcastUri.toString()); } else { nic = _networkModel.getNicInNetwork(vm.getId(), network.getId()); } if (nic == null) { s_logger.warn("Could not get a nic with " + network); return false; } // don't delete default NIC on a user VM if (nic.isDefaultNic() && vm.getType() == VirtualMachine.Type.User) { s_logger.warn("Failed to remove nic from " + vm + " in " + network + ", nic is default."); throw new CloudRuntimeException("Failed to remove nic from " + vm + " in " + network + ", nic is default."); } //Lock on nic is needed here final Nic lock = _nicsDao.acquireInLockTable(nic.getId()); if (lock == null) { //check if nic is still there. Return if it was released already if (_nicsDao.findById(nic.getId()) == null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Not need to remove the vm " + vm + " from network " + network + " as the vm doesn't have nic in this network"); } return true; } throw new ConcurrentOperationException("Unable to lock nic " + nic.getId()); } if (s_logger.isDebugEnabled()) { s_logger.debug("Lock is acquired for nic id " + lock.getId() + " as a part of remove vm " + vm + " from network " + network); } try { final NicProfile nicProfile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), _networkModel.getNetworkRate(network.getId(), vm.getId()), _networkModel.isSecurityGroupSupportedInNetwork(network), _networkModel.getNetworkTag(vmProfile.getVirtualMachine().getHypervisorType(), network)); //1) Unplug the nic if (vm.getState() == State.Running) { final NicTO nicTO = toNicTO(nicProfile, vmProfile.getVirtualMachine().getHypervisorType()); s_logger.debug("Un-plugging nic for vm " + vm + " from network " + network); final boolean result = unplugNic(network, nicTO, vmTO, context, dest); if (result) { s_logger.debug("Nic is unplugged successfully for vm " + vm + " in network " + network); } else { s_logger.warn("Failed to unplug nic for the vm " + vm + " from network " + network); return false; } } else if (vm.getState() != State.Stopped) { s_logger.warn("Unable to remove vm " + vm + " from network " + network); throw new ResourceUnavailableException("Unable to remove vm " + vm + " from network, is not in the right state", DataCenter.class, vm.getDataCenterId()); } //2) Release the nic _networkMgr.releaseNic(vmProfile, nic); s_logger.debug("Successfully released nic " + nic + "for vm " + vm); //3) Remove the nic _networkMgr.removeNic(vmProfile, nic); return true; } finally { if (lock != null) { _nicsDao.releaseFromLockTable(lock.getId()); if (s_logger.isDebugEnabled()) { s_logger.debug("Lock is released for nic id " + lock.getId() + " as a part of remove vm " + vm + " from network " + network); } } } } @Override public void findHostAndMigrate(final String vmUuid, final Long newSvcOfferingId, final ExcludeList excludes) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); if (vm == null) { throw new CloudRuntimeException("Unable to find " + vmUuid); } final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); final Long srcHostId = vm.getHostId(); final Long oldSvcOfferingId = vm.getServiceOfferingId(); if (srcHostId == null) { throw new CloudRuntimeException("Unable to scale the vm because it doesn't have a host id"); } final Host host = _hostDao.findById(srcHostId); final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), null, null, null); excludes.addHost(vm.getHostId()); vm.setServiceOfferingId(newSvcOfferingId); // Need to find the destination host based on new svc offering DeployDestination dest = null; try { dest = _dpMgr.planDeployment(profile, plan, excludes, null); } catch (final AffinityConflictException e2) { s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2); throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict"); } if (dest != null) { if (s_logger.isDebugEnabled()) { s_logger.debug(" Found " + dest + " for scaling the vm to."); } } if (dest == null) { throw new InsufficientServerCapacityException("Unable to find a server to scale the vm to.", host.getClusterId()); } excludes.addHost(dest.getHost().getId()); try { migrateForScale(vm.getUuid(), srcHostId, dest, oldSvcOfferingId); } catch (final ResourceUnavailableException e) { s_logger.debug("Unable to migrate to unavailable " + dest); throw e; } catch (final ConcurrentOperationException e) { s_logger.debug("Unable to migrate VM due to: " + e.getMessage()); throw e; } } @Override public void migrateForScale(final String vmUuid, final long srcHostId, final DeployDestination dest, final Long oldSvcOfferingId) throws ResourceUnavailableException, ConcurrentOperationException { final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { // avoid re-entrance VmWorkJobVO placeHolder = null; final VirtualMachine vm = _vmDao.findByUuid(vmUuid); placeHolder = createPlaceHolderWork(vm.getId()); try { orchestrateMigrateForScale(vmUuid, srcHostId, dest, oldSvcOfferingId); } finally { if (placeHolder != null) { _workJobDao.expunge(placeHolder.getId()); } } } else { final Outcome<VirtualMachine> outcome = migrateVmForScaleThroughJobQueue(vmUuid, srcHostId, dest, oldSvcOfferingId); try { final VirtualMachine vm = outcome.get(); } catch (final InterruptedException e) { throw new RuntimeException("Operation is interrupted", e); } catch (final java.util.concurrent.ExecutionException e) { throw new RuntimeException("Execution excetion", e); } final Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob()); if (jobResult != null) { if (jobResult instanceof ResourceUnavailableException) { throw (ResourceUnavailableException)jobResult; } else if (jobResult instanceof ConcurrentOperationException) { throw (ConcurrentOperationException)jobResult; } else if (jobResult instanceof RuntimeException) { throw (RuntimeException)jobResult; } else if (jobResult instanceof Throwable) { throw new RuntimeException("Unexpected exception", (Throwable)jobResult); } } } } private void orchestrateMigrateForScale(final String vmUuid, final long srcHostId, final DeployDestination dest, final Long oldSvcOfferingId) throws ResourceUnavailableException, ConcurrentOperationException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); s_logger.info("Migrating " + vm + " to " + dest); vm.getServiceOfferingId(); final long dstHostId = dest.getHost().getId(); final Host fromHost = _hostDao.findById(srcHostId); if (fromHost == null) { s_logger.info("Unable to find the host to migrate from: " + srcHostId); throw new CloudRuntimeException("Unable to find the host to migrate from: " + srcHostId); } if (fromHost.getClusterId().longValue() != dest.getCluster().getId()) { s_logger.info("Source and destination host are not in same cluster, unable to migrate to host: " + dstHostId); throw new CloudRuntimeException("Source and destination host are not in same cluster, unable to migrate to host: " + dest.getHost().getId()); } final VirtualMachineGuru vmGuru = getVmGuru(vm); final long vmId = vm.getId(); vm = _vmDao.findByUuid(vmUuid); if (vm == null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Unable to find the vm " + vm); } throw new CloudRuntimeException("Unable to find a virtual machine with id " + vmId); } if (vm.getState() != State.Running) { if (s_logger.isDebugEnabled()) { s_logger.debug("VM is not Running, unable to migrate the vm " + vm); } throw new CloudRuntimeException("VM is not Running, unable to migrate the vm currently " + vm + " , current state: " + vm.getState().toString()); } AlertManager.AlertType alertType = AlertManager.AlertType.ALERT_TYPE_USERVM_MIGRATE; if (VirtualMachine.Type.DomainRouter.equals(vm.getType())) { alertType = AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER_MIGRATE; } else if (VirtualMachine.Type.ConsoleProxy.equals(vm.getType())) { alertType = AlertManager.AlertType.ALERT_TYPE_CONSOLE_PROXY_MIGRATE; } final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); _networkMgr.prepareNicForMigration(profile, dest); volumeMgr.prepareForMigration(profile, dest); final VirtualMachineTO to = toVmTO(profile); final PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(to); ItWorkVO work = new ItWorkVO(UUID.randomUUID().toString(), _nodeId, State.Migrating, vm.getType(), vm.getId()); work.setStep(Step.Prepare); work.setResourceType(ItWorkVO.ResourceType.Host); work.setResourceId(dstHostId); work = _workDao.persist(work); Answer pfma = null; try { pfma = _agentMgr.send(dstHostId, pfmc); if (pfma == null || !pfma.getResult()) { final String details = pfma != null ? pfma.getDetails() : "null answer returned"; final String msg = "Unable to prepare for migration due to " + details; pfma = null; throw new AgentUnavailableException(msg, dstHostId); } } catch (final OperationTimedoutException e1) { throw new AgentUnavailableException("Operation timed out", dstHostId); } finally { if (pfma == null) { work.setStep(Step.Done); _workDao.update(work.getId(), work); } } vm.setLastHostId(srcHostId); try { if (vm == null || vm.getHostId() == null || vm.getHostId() != srcHostId || !changeState(vm, Event.MigrationRequested, dstHostId, work, Step.Migrating)) { s_logger.info("Migration cancelled because state has changed: " + vm); throw new ConcurrentOperationException("Migration cancelled because state has changed: " + vm); } } catch (final NoTransitionException e1) { s_logger.info("Migration cancelled because " + e1.getMessage()); throw new ConcurrentOperationException("Migration cancelled because " + e1.getMessage()); } boolean migrated = false; try { final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); final MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to, getExecuteInSequence(vm.getHypervisorType())); String autoConvergence = _configDao.getValue(Config.KvmAutoConvergence.toString()); boolean kvmAutoConvergence = Boolean.parseBoolean(autoConvergence); mc.setAutoConvergence(kvmAutoConvergence); mc.setHostGuid(dest.getHost().getGuid()); try { final Answer ma = _agentMgr.send(vm.getLastHostId(), mc); if (ma == null || !ma.getResult()) { final String details = ma != null ? ma.getDetails() : "null answer returned"; final String msg = "Unable to migrate due to " + details; s_logger.error(msg); throw new CloudRuntimeException(msg); } } catch (final OperationTimedoutException e) { if (e.isActive()) { s_logger.warn("Active migration command so scheduling a restart for " + vm); _haMgr.scheduleRestart(vm, true); } throw new AgentUnavailableException("Operation timed out on migrating " + vm, dstHostId); } try { final long newServiceOfferingId = vm.getServiceOfferingId(); vm.setServiceOfferingId(oldSvcOfferingId); // release capacity for the old service offering only if (!changeState(vm, VirtualMachine.Event.OperationSucceeded, dstHostId, work, Step.Started)) { throw new ConcurrentOperationException("Unable to change the state for " + vm); } vm.setServiceOfferingId(newServiceOfferingId); } catch (final NoTransitionException e1) { throw new ConcurrentOperationException("Unable to change state due to " + e1.getMessage()); } try { if (!checkVmOnHost(vm, dstHostId)) { s_logger.error("Unable to complete migration for " + vm); try { _agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null); } catch (final AgentUnavailableException e) { s_logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("Unable to complete migration for " + vm); } } catch (final OperationTimedoutException e) { s_logger.debug("Error while checking the vm " + vm + " on host " + dstHostId, e); } migrated = true; } finally { if (!migrated) { s_logger.info("Migration was unsuccessful. Cleaning up: " + vm); _alertMgr.sendAlert(alertType, fromHost.getDataCenterId(), fromHost.getPodId(), "Unable to migrate vm " + vm.getInstanceName() + " from host " + fromHost.getName() + " in zone " + dest.getDataCenter().getName() + " and pod " + dest.getPod().getName(), "Migrate Command failed. Please check logs."); try { _agentMgr.send(dstHostId, new Commands(cleanup(vm.getInstanceName())), null); } catch (final AgentUnavailableException ae) { s_logger.info("Looks like the destination Host is unavailable for cleanup"); } try { stateTransitTo(vm, Event.OperationFailed, srcHostId); } catch (final NoTransitionException e) { s_logger.warn(e.getMessage()); } } work.setStep(Step.Done); _workDao.update(work.getId(), work); } } @Override public boolean replugNic(final Network network, final NicTO nic, final VirtualMachineTO vm, final ReservationContext context, final DeployDestination dest) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { boolean result = true; final VMInstanceVO router = _vmDao.findById(vm.getId()); if (router.getState() == State.Running) { try { final ReplugNicCommand replugNicCmd = new ReplugNicCommand(nic, vm.getName(), vm.getType(), vm.getDetails()); final Commands cmds = new Commands(Command.OnError.Stop); cmds.addCommand("replugnic", replugNicCmd); _agentMgr.send(dest.getHost().getId(), cmds); final ReplugNicAnswer replugNicAnswer = cmds.getAnswer(ReplugNicAnswer.class); if (replugNicAnswer == null || !replugNicAnswer.getResult()) { s_logger.warn("Unable to replug nic for vm " + vm.getName()); result = false; } } catch (final OperationTimedoutException e) { throw new AgentUnavailableException("Unable to plug nic for router " + vm.getName() + " in network " + network, dest.getHost().getId(), e); } } else { s_logger.warn("Unable to apply ReplugNic, vm " + router + " is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply ReplugNic on the backend," + " vm " + vm + " is not in the right state", DataCenter.class, router.getDataCenterId()); } return result; } public boolean plugNic(final Network network, final NicTO nic, final VirtualMachineTO vm, final ReservationContext context, final DeployDestination dest) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { boolean result = true; final VMInstanceVO router = _vmDao.findById(vm.getId()); if (router.getState() == State.Running) { try { final PlugNicCommand plugNicCmd = new PlugNicCommand(nic, vm.getName(), vm.getType(), vm.getDetails()); final Commands cmds = new Commands(Command.OnError.Stop); cmds.addCommand("plugnic", plugNicCmd); _agentMgr.send(dest.getHost().getId(), cmds); final PlugNicAnswer plugNicAnswer = cmds.getAnswer(PlugNicAnswer.class); if (plugNicAnswer == null || !plugNicAnswer.getResult()) { s_logger.warn("Unable to plug nic for vm " + vm.getName()); result = false; } } catch (final OperationTimedoutException e) { throw new AgentUnavailableException("Unable to plug nic for router " + vm.getName() + " in network " + network, dest.getHost().getId(), e); } } else { s_logger.warn("Unable to apply PlugNic, vm " + router + " is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply PlugNic on the backend," + " vm " + vm + " is not in the right state", DataCenter.class, router.getDataCenterId()); } return result; } public boolean unplugNic(final Network network, final NicTO nic, final VirtualMachineTO vm, final ReservationContext context, final DeployDestination dest) throws ConcurrentOperationException, ResourceUnavailableException { boolean result = true; final VMInstanceVO router = _vmDao.findById(vm.getId()); if (router.getState() == State.Running) { // collect vm network statistics before unplug a nic UserVmVO userVm = _userVmDao.findById(vm.getId()); if (userVm != null && userVm.getType() == VirtualMachine.Type.User) { _userVmService.collectVmNetworkStatistics(userVm); } try { final Commands cmds = new Commands(Command.OnError.Stop); final UnPlugNicCommand unplugNicCmd = new UnPlugNicCommand(nic, vm.getName()); cmds.addCommand("unplugnic", unplugNicCmd); _agentMgr.send(dest.getHost().getId(), cmds); final UnPlugNicAnswer unplugNicAnswer = cmds.getAnswer(UnPlugNicAnswer.class); if (unplugNicAnswer == null || !unplugNicAnswer.getResult()) { s_logger.warn("Unable to unplug nic from router " + router); result = false; } } catch (final OperationTimedoutException e) { throw new AgentUnavailableException("Unable to unplug nic from rotuer " + router + " from network " + network, dest.getHost().getId(), e); } } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { s_logger.debug("Vm " + router.getInstanceName() + " is in " + router.getState() + ", so not sending unplug nic command to the backend"); } else { s_logger.warn("Unable to apply unplug nic, Vm " + router + " is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply unplug nic on the backend," + " vm " + router + " is not in the right state", DataCenter.class, router.getDataCenterId()); } return result; } @Override public VMInstanceVO reConfigureVm(final String vmUuid, final ServiceOffering oldServiceOffering, final boolean reconfiguringOnExistingHost) throws ResourceUnavailableException, InsufficientServerCapacityException, ConcurrentOperationException { final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { // avoid re-entrance VmWorkJobVO placeHolder = null; final VirtualMachine vm = _vmDao.findByUuid(vmUuid); placeHolder = createPlaceHolderWork(vm.getId()); try { return orchestrateReConfigureVm(vmUuid, oldServiceOffering, reconfiguringOnExistingHost); } finally { if (placeHolder != null) { _workJobDao.expunge(placeHolder.getId()); } } } else { final Outcome<VirtualMachine> outcome = reconfigureVmThroughJobQueue(vmUuid, oldServiceOffering, reconfiguringOnExistingHost); VirtualMachine vm = null; try { vm = outcome.get(); } catch (final InterruptedException e) { throw new RuntimeException("Operation is interrupted", e); } catch (final java.util.concurrent.ExecutionException e) { throw new RuntimeException("Execution excetion", e); } final Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob()); if (jobResult != null) { if (jobResult instanceof ResourceUnavailableException) { throw (ResourceUnavailableException)jobResult; } else if (jobResult instanceof ConcurrentOperationException) { throw (ConcurrentOperationException)jobResult; } else if (jobResult instanceof InsufficientServerCapacityException) { throw (InsufficientServerCapacityException)jobResult; } else if (jobResult instanceof Throwable) { s_logger.error("Unhandled exception", (Throwable)jobResult); throw new RuntimeException("Unhandled exception", (Throwable)jobResult); } } return (VMInstanceVO)vm; } } private VMInstanceVO orchestrateReConfigureVm(final String vmUuid, final ServiceOffering oldServiceOffering, final boolean reconfiguringOnExistingHost) throws ResourceUnavailableException, ConcurrentOperationException { final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); final long newServiceofferingId = vm.getServiceOfferingId(); final ServiceOffering newServiceOffering = _offeringDao.findById(vm.getId(), newServiceofferingId); final HostVO hostVo = _hostDao.findById(vm.getHostId()); final Float memoryOvercommitRatio = CapacityManager.MemOverprovisioningFactor.valueIn(hostVo.getClusterId()); final Float cpuOvercommitRatio = CapacityManager.CpuOverprovisioningFactor.valueIn(hostVo.getClusterId()); final long minMemory = (long)(newServiceOffering.getRamSize() / memoryOvercommitRatio); final ScaleVmCommand reconfigureCmd = new ScaleVmCommand(vm.getInstanceName(), newServiceOffering.getCpu(), (int)(newServiceOffering.getSpeed() / cpuOvercommitRatio), newServiceOffering.getSpeed(), minMemory * 1024L * 1024L, newServiceOffering.getRamSize() * 1024L * 1024L, newServiceOffering.getLimitCpuUse()); final Long dstHostId = vm.getHostId(); if(vm.getHypervisorType().equals(HypervisorType.VMware)) { final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType()); Map<String, String> details = null; details = hvGuru.getClusterSettings(vm.getId()); reconfigureCmd.getVirtualMachine().setDetails(details); } final ItWorkVO work = new ItWorkVO(UUID.randomUUID().toString(), _nodeId, State.Running, vm.getType(), vm.getId()); work.setStep(Step.Prepare); work.setResourceType(ItWorkVO.ResourceType.Host); work.setResourceId(vm.getHostId()); _workDao.persist(work); boolean success = false; try { if (reconfiguringOnExistingHost) { vm.setServiceOfferingId(oldServiceOffering.getId()); _capacityMgr.releaseVmCapacity(vm, false, false, vm.getHostId()); //release the old capacity vm.setServiceOfferingId(newServiceofferingId); _capacityMgr.allocateVmCapacity(vm, false); // lock the new capacity } final Answer reconfigureAnswer = _agentMgr.send(vm.getHostId(), reconfigureCmd); if (reconfigureAnswer == null || !reconfigureAnswer.getResult()) { s_logger.error("Unable to scale vm due to " + (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails())); throw new CloudRuntimeException("Unable to scale vm due to " + (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails())); } success = true; } catch (final OperationTimedoutException e) { throw new AgentUnavailableException("Operation timed out on reconfiguring " + vm, dstHostId); } catch (final AgentUnavailableException e) { throw e; } finally { if (!success) { _capacityMgr.releaseVmCapacity(vm, false, false, vm.getHostId()); // release the new capacity vm.setServiceOfferingId(oldServiceOffering.getId()); _capacityMgr.allocateVmCapacity(vm, false); // allocate the old capacity } } return vm; } @Override public String getConfigComponentName() { return VirtualMachineManager.class.getSimpleName(); } @Override public ConfigKey<?>[] getConfigKeys() { return new ConfigKey<?>[] {ClusterDeltaSyncInterval, StartRetry, VmDestroyForcestop, VmOpCancelInterval, VmOpCleanupInterval, VmOpCleanupWait, VmOpLockStateRetry, VmOpWaitInterval, ExecuteInSequence, VmJobCheckInterval, VmJobTimeout, VmJobStateReportInterval, VmConfigDriveLabel, VmConfigDriveOnPrimaryPool, HaVmRestartHostUp}; } public List<StoragePoolAllocator> getStoragePoolAllocators() { return _storagePoolAllocators; } @Inject public void setStoragePoolAllocators(final List<StoragePoolAllocator> storagePoolAllocators) { _storagePoolAllocators = storagePoolAllocators; } // // PowerState report handling for out-of-band changes and handling of left-over transitional VM states // @MessageHandler(topic = Topics.VM_POWER_STATE) protected void HandlePowerStateReport(final String subject, final String senderAddress, final Object args) { assert args != null; final Long vmId = (Long)args; final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs( VirtualMachine.Type.Instance, vmId); if (pendingWorkJobs.size() == 0 && !_haMgr.hasPendingHaWork(vmId)) { // there is no pending operation job final VMInstanceVO vm = _vmDao.findById(vmId); if (vm != null) { switch (vm.getPowerState()) { case PowerOn: handlePowerOnReportWithNoPendingJobsOnVM(vm); break; case PowerOff: case PowerReportMissing: handlePowerOffReportWithNoPendingJobsOnVM(vm); break; // PowerUnknown shouldn't be reported, it is a derived // VM power state from host state (host un-reachable) case PowerUnknown: default: assert false; break; } } else { s_logger.warn("VM " + vmId + " no longer exists when processing VM state report"); } } else { s_logger.info("There is pending job or HA tasks working on the VM. vm id: " + vmId + ", postpone power-change report by resetting power-change counters"); // reset VM power state tracking so that we won't lost signal when VM has // been translated to _vmDao.resetVmPowerStateTracking(vmId); } } private void handlePowerOnReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { // // 1) handle left-over transitional VM states // 2) handle out of band VM live migration // 3) handle out of sync stationary states, marking VM from Stopped to Running with // alert messages // switch (vm.getState()) { case Starting: s_logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { s_logger.warn("Unexpected VM state transition exception, race-condition?", e); } s_logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); // we need to alert admin or user about this risky state transition _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") state is sync-ed (Starting -> Running) from out-of-context transition. VM network environment may need to be reset"); break; case Running: try { if (vm.getHostId() != null && vm.getHostId().longValue() != vm.getPowerHostId().longValue()) { s_logger.info("Detected out of band VM migration from host " + vm.getHostId() + " to host " + vm.getPowerHostId()); } stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { s_logger.warn("Unexpected VM state transition exception, race-condition?", e); } break; case Stopping: case Stopped: s_logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { s_logger.warn("Unexpected VM state transition exception, race-condition?", e); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") state is sync-ed (" + vm.getState() + " -> Running) from out-of-context transition. VM network environment may need to be reset"); s_logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); break; case Destroyed: case Expunging: s_logger.info("Receive power on report when VM is in destroyed or expunging state. vm: " + vm.getId() + ", state: " + vm.getState()); break; case Migrating: s_logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { s_logger.warn("Unexpected VM state transition exception, race-condition?", e); } s_logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); break; case Error: default: s_logger.info("Receive power on report when VM is in error or unexpected state. vm: " + vm.getId() + ", state: " + vm.getState()); break; } } private void handlePowerOffReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { // 1) handle left-over transitional VM states // 2) handle out of sync stationary states, schedule force-stop to release resources // switch (vm.getState()) { case Starting: case Stopping: case Running: case Stopped: case Migrating: s_logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-off report while there is no pending jobs on it"); if(vm.isHaEnabled() && vm.getState() == State.Running && HaVmRestartHostUp.value() && vm.getHypervisorType() != HypervisorType.VMware && vm.getHypervisorType() != HypervisorType.Hyperv) { s_logger.info("Detected out-of-band stop of a HA enabled VM " + vm.getInstanceName() + ", will schedule restart"); if(!_haMgr.hasPendingHaWork(vm.getId())) { _haMgr.scheduleRestart(vm, true); } else { s_logger.info("VM " + vm.getInstanceName() + " already has an pending HA task working on it"); } return; } final VirtualMachineGuru vmGuru = getVmGuru(vm); final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); if (!sendStop(vmGuru, profile, true, true)) { // In case StopCommand fails, don't proceed further return; } try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOffReport, null); } catch (final NoTransitionException e) { s_logger.warn("Unexpected VM state transition exception, race-condition?", e); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") state is sync-ed (" + vm.getState() + " -> Stopped) from out-of-context transition."); s_logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Stopped state according to power-off report from hypervisor"); break; case Destroyed: case Expunging: break; case Error: default: break; } } private void scanStalledVMInTransitionStateOnUpHost(final long hostId) { // // Check VM that is stuck in Starting, Stopping, Migrating states, we won't check // VMs in expunging state (this need to be handled specially) // // checking condition // 1) no pending VmWork job // 2) on hostId host and host is UP // // When host is UP, soon or later we will get a report from the host about the VM, // however, if VM is missing from the host report (it may happen in out of band changes // or from designed behave of XS/KVM), the VM may not get a chance to run the state-sync logic // // Therefore, we will scan thoses VMs on UP host based on last update timestamp, if the host is UP // and a VM stalls for status update, we will consider them to be powered off // (which is relatively safe to do so) final long stallThresholdInMs = VmJobStateReportInterval.value() + (VmJobStateReportInterval.value() >> 1); final Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - stallThresholdInMs); final List<Long> mostlikelyStoppedVMs = listStalledVMInTransitionStateOnUpHost(hostId, cutTime); for (final Long vmId : mostlikelyStoppedVMs) { final VMInstanceVO vm = _vmDao.findById(vmId); assert vm != null; handlePowerOffReportWithNoPendingJobsOnVM(vm); } final List<Long> vmsWithRecentReport = listVMInTransitionStateWithRecentReportOnUpHost(hostId, cutTime); for (final Long vmId : vmsWithRecentReport) { final VMInstanceVO vm = _vmDao.findById(vmId); assert vm != null; if (vm.getPowerState() == PowerState.PowerOn) { handlePowerOnReportWithNoPendingJobsOnVM(vm); } else { handlePowerOffReportWithNoPendingJobsOnVM(vm); } } } private void scanStalledVMInTransitionStateOnDisconnectedHosts() { final Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - VmOpWaitInterval.value() * 1000); final List<Long> stuckAndUncontrollableVMs = listStalledVMInTransitionStateOnDisconnectedHosts(cutTime); for (final Long vmId : stuckAndUncontrollableVMs) { final VMInstanceVO vm = _vmDao.findById(vmId); // We now only alert administrator about this situation _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") is stuck in " + vm.getState() + " state and its host is unreachable for too long"); } } // VMs that in transitional state without recent power state report private List<Long> listStalledVMInTransitionStateOnUpHost(final long hostId, final Date cutTime) { final String sql = "SELECT i.* FROM vm_instance as i, host as h WHERE h.status = 'UP' " + "AND h.id = ? AND i.power_state_update_time < ? AND i.host_id = h.id " + "AND (i.state ='Starting' OR i.state='Stopping' OR i.state='Migrating') " + "AND i.id NOT IN (SELECT w.vm_instance_id FROM vm_work_job AS w JOIN async_job AS j ON w.id = j.id WHERE j.job_status = ?)" + "AND i.removed IS NULL"; final List<Long> l = new ArrayList<Long>(); TransactionLegacy txn = null; try { txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(sql); pstmt.setLong(1, hostId); pstmt.setString(2, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime)); pstmt.setInt(3, JobInfo.Status.IN_PROGRESS.ordinal()); final ResultSet rs = pstmt.executeQuery(); while (rs.next()) { l.add(rs.getLong(1)); } } catch (final SQLException e) { } catch (final Throwable e) { } } finally { if (txn != null) { txn.close(); } } return l; } // VMs that in transitional state and recently have power state update private List<Long> listVMInTransitionStateWithRecentReportOnUpHost(final long hostId, final Date cutTime) { final String sql = "SELECT i.* FROM vm_instance as i, host as h WHERE h.status = 'UP' " + "AND h.id = ? AND i.power_state_update_time > ? AND i.host_id = h.id " + "AND (i.state ='Starting' OR i.state='Stopping' OR i.state='Migrating') " + "AND i.id NOT IN (SELECT w.vm_instance_id FROM vm_work_job AS w JOIN async_job AS j ON w.id = j.id WHERE j.job_status = ?)" + "AND i.removed IS NULL"; final List<Long> l = new ArrayList<Long>(); TransactionLegacy txn = null; try { txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(sql); pstmt.setLong(1, hostId); pstmt.setString(2, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime)); pstmt.setInt(3, JobInfo.Status.IN_PROGRESS.ordinal()); final ResultSet rs = pstmt.executeQuery(); while (rs.next()) { l.add(rs.getLong(1)); } } catch (final SQLException e) { } catch (final Throwable e) { } return l; } finally { if (txn != null) { txn.close(); } } } private List<Long> listStalledVMInTransitionStateOnDisconnectedHosts(final Date cutTime) { final String sql = "SELECT i.* FROM vm_instance as i, host as h WHERE h.status != 'UP' " + "AND i.power_state_update_time < ? AND i.host_id = h.id " + "AND (i.state ='Starting' OR i.state='Stopping' OR i.state='Migrating') " + "AND i.id NOT IN (SELECT w.vm_instance_id FROM vm_work_job AS w JOIN async_job AS j ON w.id = j.id WHERE j.job_status = ?)" + "AND i.removed IS NULL"; final List<Long> l = new ArrayList<Long>(); TransactionLegacy txn = null; try { txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(sql); pstmt.setString(1, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime)); pstmt.setInt(2, JobInfo.Status.IN_PROGRESS.ordinal()); final ResultSet rs = pstmt.executeQuery(); while (rs.next()) { l.add(rs.getLong(1)); } } catch (final SQLException e) { } catch (final Throwable e) { } return l; } finally { if (txn != null) { txn.close(); } } } // // VM operation based on new sync model // public class VmStateSyncOutcome extends OutcomeImpl<VirtualMachine> { private long _vmId; public VmStateSyncOutcome(final AsyncJob job, final PowerState desiredPowerState, final long vmId, final Long srcHostIdForMigration) { super(VirtualMachine.class, job, VmJobCheckInterval.value(), new Predicate() { @Override public boolean checkCondition() { final AsyncJobVO jobVo = _entityMgr.findById(AsyncJobVO.class, job.getId()); assert jobVo != null; if (jobVo == null || jobVo.getStatus() != JobInfo.Status.IN_PROGRESS) { return true; } return false; } }, Topics.VM_POWER_STATE, AsyncJob.Topics.JOB_STATE); _vmId = vmId; } @Override protected VirtualMachine retrieve() { return _vmDao.findById(_vmId); } } public class VmJobVirtualMachineOutcome extends OutcomeImpl<VirtualMachine> { private long _vmId; public VmJobVirtualMachineOutcome(final AsyncJob job, final long vmId) { super(VirtualMachine.class, job, VmJobCheckInterval.value(), new Predicate() { @Override public boolean checkCondition() { final AsyncJobVO jobVo = _entityMgr.findById(AsyncJobVO.class, job.getId()); assert jobVo != null; if (jobVo == null || jobVo.getStatus() != JobInfo.Status.IN_PROGRESS) { return true; } return false; } }, AsyncJob.Topics.JOB_STATE); _vmId = vmId; } @Override protected VirtualMachine retrieve() { return _vmDao.findById(_vmId); } } // // TODO build a common pattern to reduce code duplication in following methods // no time for this at current iteration // public Outcome<VirtualMachine> startVmThroughJobQueue(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params, final DeploymentPlan planToDeploy, final DeploymentPlanner planner) { final CallContext context = CallContext.current(); final User callingUser = context.getCallingUser(); final Account callingAccount = context.getCallingAccount(); final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); VmWorkJobVO workJob = null; final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs(VirtualMachine.Type.Instance, vm.getId(), VmWorkStart.class.getName()); if (pendingWorkJobs.size() > 0) { assert pendingWorkJobs.size() == 1; workJob = pendingWorkJobs.get(0); } else { workJob = new VmWorkJobVO(context.getContextId()); workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkStart.class.getName()); workJob.setAccountId(callingAccount.getId()); workJob.setUserId(callingUser.getId()); workJob.setStep(VmWorkJobVO.Step.Starting); workJob.setVmType(VirtualMachine.Type.Instance); workJob.setVmInstanceId(vm.getId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); // save work context info (there are some duplications) final VmWorkStart workInfo = new VmWorkStart(callingUser.getId(), callingAccount.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER); workInfo.setPlan(planToDeploy); workInfo.setParams(params); if (planner != null) { workInfo.setDeploymentPlanner(planner.getName()); } workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); } AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); return new VmStateSyncOutcome(workJob, VirtualMachine.PowerState.PowerOn, vm.getId(), null); } public Outcome<VirtualMachine> stopVmThroughJobQueue(final String vmUuid, final boolean cleanup) { final CallContext context = CallContext.current(); final Account account = context.getCallingAccount(); final User user = context.getCallingUser(); final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs( vm.getType(), vm.getId(), VmWorkStop.class.getName()); VmWorkJobVO workJob = null; if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { assert pendingWorkJobs.size() == 1; workJob = pendingWorkJobs.get(0); } else { workJob = new VmWorkJobVO(context.getContextId()); workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkStop.class.getName()); workJob.setAccountId(account.getId()); workJob.setUserId(user.getId()); workJob.setStep(VmWorkJobVO.Step.Prepare); workJob.setVmType(VirtualMachine.Type.Instance); workJob.setVmInstanceId(vm.getId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); // save work context info (there are some duplications) final VmWorkStop workInfo = new VmWorkStop(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, cleanup); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); } AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); return new VmStateSyncOutcome(workJob, VirtualMachine.PowerState.PowerOff, vm.getId(), null); } public Outcome<VirtualMachine> rebootVmThroughJobQueue(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params) { final CallContext context = CallContext.current(); final Account account = context.getCallingAccount(); final User user = context.getCallingUser(); final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs( VirtualMachine.Type.Instance, vm.getId(), VmWorkReboot.class.getName()); VmWorkJobVO workJob = null; if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { assert pendingWorkJobs.size() == 1; workJob = pendingWorkJobs.get(0); } else { workJob = new VmWorkJobVO(context.getContextId()); workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkReboot.class.getName()); workJob.setAccountId(account.getId()); workJob.setUserId(user.getId()); workJob.setStep(VmWorkJobVO.Step.Prepare); workJob.setVmType(VirtualMachine.Type.Instance); workJob.setVmInstanceId(vm.getId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); // save work context info (there are some duplications) final VmWorkReboot workInfo = new VmWorkReboot(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, params); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); } AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); return new VmJobVirtualMachineOutcome(workJob, vm.getId()); } public Outcome<VirtualMachine> migrateVmThroughJobQueue(final String vmUuid, final long srcHostId, final DeployDestination dest) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); final Account account = context.getCallingAccount(); final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs( VirtualMachine.Type.Instance, vm.getId(), VmWorkMigrate.class.getName()); VmWorkJobVO workJob = null; if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { assert pendingWorkJobs.size() == 1; workJob = pendingWorkJobs.get(0); } else { workJob = new VmWorkJobVO(context.getContextId()); workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkMigrate.class.getName()); workJob.setAccountId(account.getId()); workJob.setUserId(user.getId()); workJob.setVmType(VirtualMachine.Type.Instance); workJob.setVmInstanceId(vm.getId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); // save work context info (there are some duplications) final VmWorkMigrate workInfo = new VmWorkMigrate(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, srcHostId, dest); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); } AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); return new VmStateSyncOutcome(workJob, VirtualMachine.PowerState.PowerOn, vm.getId(), vm.getPowerHostId()); } public Outcome<VirtualMachine> migrateVmAwayThroughJobQueue(final String vmUuid, final long srcHostId) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); final Account account = context.getCallingAccount(); final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs( VirtualMachine.Type.Instance, vm.getId(), VmWorkMigrateAway.class.getName()); VmWorkJobVO workJob = null; if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { assert pendingWorkJobs.size() == 1; workJob = pendingWorkJobs.get(0); } else { workJob = new VmWorkJobVO(context.getContextId()); workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkMigrateAway.class.getName()); workJob.setAccountId(account.getId()); workJob.setUserId(user.getId()); workJob.setVmType(VirtualMachine.Type.Instance); workJob.setVmInstanceId(vm.getId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); // save work context info (there are some duplications) final VmWorkMigrateAway workInfo = new VmWorkMigrateAway(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, srcHostId); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); } _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); return new VmStateSyncOutcome(workJob, VirtualMachine.PowerState.PowerOn, vm.getId(), vm.getPowerHostId()); } public Outcome<VirtualMachine> migrateVmWithStorageThroughJobQueue( final String vmUuid, final long srcHostId, final long destHostId, final Map<Long, Long> volumeToPool) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); final Account account = context.getCallingAccount(); final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs( VirtualMachine.Type.Instance, vm.getId(), VmWorkMigrateWithStorage.class.getName()); VmWorkJobVO workJob = null; if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { assert pendingWorkJobs.size() == 1; workJob = pendingWorkJobs.get(0); } else { workJob = new VmWorkJobVO(context.getContextId()); workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkMigrateWithStorage.class.getName()); workJob.setAccountId(account.getId()); workJob.setUserId(user.getId()); workJob.setVmType(VirtualMachine.Type.Instance); workJob.setVmInstanceId(vm.getId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); // save work context info (there are some duplications) final VmWorkMigrateWithStorage workInfo = new VmWorkMigrateWithStorage(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, srcHostId, destHostId, volumeToPool); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); } AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); return new VmStateSyncOutcome(workJob, VirtualMachine.PowerState.PowerOn, vm.getId(), destHostId); } public Outcome<VirtualMachine> migrateVmForScaleThroughJobQueue( final String vmUuid, final long srcHostId, final DeployDestination dest, final Long newSvcOfferingId) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); final Account account = context.getCallingAccount(); final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs( VirtualMachine.Type.Instance, vm.getId(), VmWorkMigrateForScale.class.getName()); VmWorkJobVO workJob = null; if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { assert pendingWorkJobs.size() == 1; workJob = pendingWorkJobs.get(0); } else { workJob = new VmWorkJobVO(context.getContextId()); workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkMigrateForScale.class.getName()); workJob.setAccountId(account.getId()); workJob.setUserId(user.getId()); workJob.setVmType(VirtualMachine.Type.Instance); workJob.setVmInstanceId(vm.getId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); // save work context info (there are some duplications) final VmWorkMigrateForScale workInfo = new VmWorkMigrateForScale(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, srcHostId, dest, newSvcOfferingId); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); } AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); return new VmJobVirtualMachineOutcome(workJob, vm.getId()); } public Outcome<VirtualMachine> migrateVmStorageThroughJobQueue( final String vmUuid, final StoragePool destPool) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); final Account account = context.getCallingAccount(); final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs( VirtualMachine.Type.Instance, vm.getId(), VmWorkStorageMigration.class.getName()); VmWorkJobVO workJob = null; if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { assert pendingWorkJobs.size() == 1; workJob = pendingWorkJobs.get(0); } else { workJob = new VmWorkJobVO(context.getContextId()); workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkStorageMigration.class.getName()); workJob.setAccountId(account.getId()); workJob.setUserId(user.getId()); workJob.setVmType(VirtualMachine.Type.Instance); workJob.setVmInstanceId(vm.getId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); // save work context info (there are some duplications) final VmWorkStorageMigration workInfo = new VmWorkStorageMigration(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, destPool.getId()); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); } AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); return new VmJobVirtualMachineOutcome(workJob, vm.getId()); } public Outcome<VirtualMachine> addVmToNetworkThroughJobQueue( final VirtualMachine vm, final Network network, final NicProfile requested) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); final Account account = context.getCallingAccount(); final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs( VirtualMachine.Type.Instance, vm.getId(), VmWorkAddVmToNetwork.class.getName()); VmWorkJobVO workJob = null; if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { assert pendingWorkJobs.size() == 1; workJob = pendingWorkJobs.get(0); } else { workJob = new VmWorkJobVO(context.getContextId()); workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkAddVmToNetwork.class.getName()); workJob.setAccountId(account.getId()); workJob.setUserId(user.getId()); workJob.setVmType(VirtualMachine.Type.Instance); workJob.setVmInstanceId(vm.getId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); // save work context info (there are some duplications) final VmWorkAddVmToNetwork workInfo = new VmWorkAddVmToNetwork(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, network.getId(), requested); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); } AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); return new VmJobVirtualMachineOutcome(workJob, vm.getId()); } public Outcome<VirtualMachine> removeNicFromVmThroughJobQueue( final VirtualMachine vm, final Nic nic) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); final Account account = context.getCallingAccount(); final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs( VirtualMachine.Type.Instance, vm.getId(), VmWorkRemoveNicFromVm.class.getName()); VmWorkJobVO workJob = null; if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { assert pendingWorkJobs.size() == 1; workJob = pendingWorkJobs.get(0); } else { workJob = new VmWorkJobVO(context.getContextId()); workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkRemoveNicFromVm.class.getName()); workJob.setAccountId(account.getId()); workJob.setUserId(user.getId()); workJob.setVmType(VirtualMachine.Type.Instance); workJob.setVmInstanceId(vm.getId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); // save work context info (there are some duplications) final VmWorkRemoveNicFromVm workInfo = new VmWorkRemoveNicFromVm(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, nic.getId()); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); } AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); return new VmJobVirtualMachineOutcome(workJob, vm.getId()); } public Outcome<VirtualMachine> removeVmFromNetworkThroughJobQueue( final VirtualMachine vm, final Network network, final URI broadcastUri) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); final Account account = context.getCallingAccount(); final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs( VirtualMachine.Type.Instance, vm.getId(), VmWorkRemoveVmFromNetwork.class.getName()); VmWorkJobVO workJob = null; if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { assert pendingWorkJobs.size() == 1; workJob = pendingWorkJobs.get(0); } else { workJob = new VmWorkJobVO(context.getContextId()); workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkRemoveVmFromNetwork.class.getName()); workJob.setAccountId(account.getId()); workJob.setUserId(user.getId()); workJob.setVmType(VirtualMachine.Type.Instance); workJob.setVmInstanceId(vm.getId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); // save work context info (there are some duplications) final VmWorkRemoveVmFromNetwork workInfo = new VmWorkRemoveVmFromNetwork(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, network, broadcastUri); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); } AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); return new VmJobVirtualMachineOutcome(workJob, vm.getId()); } public Outcome<VirtualMachine> reconfigureVmThroughJobQueue( final String vmUuid, final ServiceOffering newServiceOffering, final boolean reconfiguringOnExistingHost) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); final Account account = context.getCallingAccount(); final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs( VirtualMachine.Type.Instance, vm.getId(), VmWorkReconfigure.class.getName()); VmWorkJobVO workJob = null; if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { assert pendingWorkJobs.size() == 1; workJob = pendingWorkJobs.get(0); } else { workJob = new VmWorkJobVO(context.getContextId()); workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkReconfigure.class.getName()); workJob.setAccountId(account.getId()); workJob.setUserId(user.getId()); workJob.setVmType(VirtualMachine.Type.Instance); workJob.setVmInstanceId(vm.getId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); // save work context info (there are some duplications) final VmWorkReconfigure workInfo = new VmWorkReconfigure(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, newServiceOffering.getId(), reconfiguringOnExistingHost); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); } AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); return new VmJobVirtualMachineOutcome(workJob, vm.getId()); } @ReflectionUse private Pair<JobInfo.Status, String> orchestrateStart(final VmWorkStart work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { s_logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; try{ orchestrateStart(vm.getUuid(), work.getParams(), work.getPlan(), _dpMgr.getDeploymentPlannerByName(work.getDeploymentPlanner())); } catch (CloudRuntimeException e){ e.printStackTrace(); s_logger.info("Caught CloudRuntimeException, returning job failed " + e); CloudRuntimeException ex = new CloudRuntimeException("Unable to start VM instance"); return new Pair<JobInfo.Status, String>(JobInfo.Status.FAILED, JobSerializerHelper.toObjectSerializedString(ex)); } return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, null); } @ReflectionUse private Pair<JobInfo.Status, String> orchestrateStop(final VmWorkStop work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { s_logger.info("Unable to find vm " + work.getVmId()); throw new CloudRuntimeException("Unable to find VM id=" + work.getVmId()); } orchestrateStop(vm.getUuid(), work.isCleanup()); return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, null); } @ReflectionUse private Pair<JobInfo.Status, String> orchestrateMigrate(final VmWorkMigrate work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { s_logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; orchestrateMigrate(vm.getUuid(), work.getSrcHostId(), work.getDeployDestination()); return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, null); } @ReflectionUse private Pair<JobInfo.Status, String> orchestrateMigrateAway(final VmWorkMigrateAway work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { s_logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; try { orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), null); } catch (final InsufficientServerCapacityException e) { s_logger.warn("Failed to deploy vm " + vm.getId() + " with original planner, sending HAPlanner"); orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), _haMgr.getHAPlanner()); } return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, null); } @ReflectionUse private Pair<JobInfo.Status, String> orchestrateMigrateWithStorage(final VmWorkMigrateWithStorage work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { s_logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; orchestrateMigrateWithStorage(vm.getUuid(), work.getSrcHostId(), work.getDestHostId(), work.getVolumeToPool()); return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, null); } @ReflectionUse private Pair<JobInfo.Status, String> orchestrateMigrateForScale(final VmWorkMigrateForScale work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { s_logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; orchestrateMigrateForScale(vm.getUuid(), work.getSrcHostId(), work.getDeployDestination(), work.getNewServiceOfferringId()); return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, null); } @ReflectionUse private Pair<JobInfo.Status, String> orchestrateReboot(final VmWorkReboot work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { s_logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; orchestrateReboot(vm.getUuid(), work.getParams()); return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, null); } @ReflectionUse private Pair<JobInfo.Status, String> orchestrateAddVmToNetwork(final VmWorkAddVmToNetwork work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { s_logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; final Network network = _networkDao.findById(work.getNetworkId()); final NicProfile nic = orchestrateAddVmToNetwork(vm, network, work.getRequestedNicProfile()); return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, _jobMgr.marshallResultObject(nic)); } @ReflectionUse private Pair<JobInfo.Status, String> orchestrateRemoveNicFromVm(final VmWorkRemoveNicFromVm work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { s_logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; final NicVO nic = _entityMgr.findById(NicVO.class, work.getNicId()); final boolean result = orchestrateRemoveNicFromVm(vm, nic); return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, _jobMgr.marshallResultObject(result)); } @ReflectionUse private Pair<JobInfo.Status, String> orchestrateRemoveVmFromNetwork(final VmWorkRemoveVmFromNetwork work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { s_logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; final boolean result = orchestrateRemoveVmFromNetwork(vm, work.getNetwork(), work.getBroadcastUri()); return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, _jobMgr.marshallResultObject(result)); } @ReflectionUse private Pair<JobInfo.Status, String> orchestrateReconfigure(final VmWorkReconfigure work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { s_logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; final ServiceOffering newServiceOffering = _offeringDao.findById(vm.getId(), work.getNewServiceOfferingId()); reConfigureVm(vm.getUuid(), newServiceOffering, work.isSameHost()); return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, null); } @ReflectionUse private Pair<JobInfo.Status, String> orchestrateStorageMigration(final VmWorkStorageMigration work) throws Exception { final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { s_logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; final StoragePool pool = (PrimaryDataStoreInfo)dataStoreMgr.getPrimaryDataStore(work.getDestStoragePoolId()); orchestrateStorageMigration(vm.getUuid(), pool); return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, null); } @Override public Pair<JobInfo.Status, String> handleVmWorkJob(final VmWork work) throws Exception { return _jobHandlerProxy.handleVmWorkJob(work); } private VmWorkJobVO createPlaceHolderWork(final long instanceId) { final VmWorkJobVO workJob = new VmWorkJobVO(""); workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_PLACEHOLDER); workJob.setCmd(""); workJob.setCmdInfo(""); workJob.setAccountId(0); workJob.setUserId(0); workJob.setStep(VmWorkJobVO.Step.Starting); workJob.setVmType(VirtualMachine.Type.Instance); workJob.setVmInstanceId(instanceId); workJob.setInitMsid(ManagementServerNode.getManagementServerId()); _workJobDao.persist(workJob); return workJob; } }
3e11a3c677683de0b20d5aee1b4e258ed217ed9a
13,126
java
Java
modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java
brat-kuzma/ignite
d7148828f947728a5b46f4f39f36eca8b0e6e521
[ "CC0-1.0" ]
4,339
2015-08-21T21:13:25.000Z
2022-03-30T09:56:44.000Z
modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java
brat-kuzma/ignite
d7148828f947728a5b46f4f39f36eca8b0e6e521
[ "CC0-1.0" ]
1,933
2015-08-24T11:37:40.000Z
2022-03-31T08:37:08.000Z
modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java
brat-kuzma/ignite
d7148828f947728a5b46f4f39f36eca8b0e6e521
[ "CC0-1.0" ]
2,140
2015-08-21T22:09:00.000Z
2022-03-25T07:57:34.000Z
41.537975
133
0.636294
7,444
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.commandline; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.function.Function; import java.util.logging.Logger; import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.client.GridClientNode; import org.apache.ignite.internal.commandline.argument.CommandArgUtils; import org.apache.ignite.internal.commandline.baseline.AutoAdjustCommandArg; import org.apache.ignite.internal.commandline.baseline.BaselineArguments; import org.apache.ignite.internal.commandline.baseline.BaselineSubcommands; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.visor.baseline.VisorBaselineAutoAdjustSettings; import org.apache.ignite.internal.visor.baseline.VisorBaselineNode; import org.apache.ignite.internal.visor.baseline.VisorBaselineTask; import org.apache.ignite.internal.visor.baseline.VisorBaselineTaskArg; import org.apache.ignite.internal.visor.baseline.VisorBaselineTaskResult; import org.apache.ignite.internal.visor.util.VisorTaskUtils; import static java.lang.Boolean.TRUE; import static java.util.Collections.singletonMap; import static org.apache.ignite.internal.commandline.CommandHandler.DELIM; import static org.apache.ignite.internal.commandline.CommandList.BASELINE; import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; import static org.apache.ignite.internal.commandline.CommandLogger.optional; import static org.apache.ignite.internal.commandline.CommonArgParser.CMD_AUTO_CONFIRMATION; import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; import static org.apache.ignite.internal.commandline.baseline.BaselineSubcommands.of; /** * Commands associated with baseline functionality. */ public class BaselineCommand extends AbstractCommand<BaselineArguments> { /** Arguments. */ private BaselineArguments baselineArgs; /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { final String constistIds = "consistentId1[,consistentId2,....,consistentIdN]"; usage(logger, "Print cluster baseline topology:", BASELINE, singletonMap("verbose", "Show the full list of node ips."), optional("--verbose")); usage(logger, "Add nodes into baseline topology:", BASELINE, BaselineSubcommands.ADD.text(), constistIds, optional(CMD_AUTO_CONFIRMATION)); usage(logger, "Remove nodes from baseline topology:", BASELINE, BaselineSubcommands.REMOVE.text(), constistIds, optional(CMD_AUTO_CONFIRMATION)); usage(logger, "Set baseline topology:", BASELINE, BaselineSubcommands.SET.text(), constistIds, optional(CMD_AUTO_CONFIRMATION)); usage(logger, "Set baseline topology based on version:", BASELINE, BaselineSubcommands.VERSION.text() + " topologyVersion", optional(CMD_AUTO_CONFIRMATION)); usage(logger, "Set baseline autoadjustment settings:", BASELINE, BaselineSubcommands.AUTO_ADJUST.text(), "[disable|enable] [timeout <timeoutMillis>]", optional(CMD_AUTO_CONFIRMATION)); } /** {@inheritDoc} */ @Override public String confirmationPrompt() { if (baselineArgs != null && BaselineSubcommands.COLLECT != baselineArgs.getCmd()) return "Warning: the command will perform changes in baseline."; return null; } /** * Change baseline. * * @param clientCfg Client configuration. * @throws Exception If failed to execute baseline action. */ @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { try (GridClient client = Command.startClient(clientCfg)) { UUID coordinatorId = client.compute() //Only non client node can be coordinator. .nodes(node -> !node.isClient()) .stream() .min(Comparator.comparingLong(GridClientNode::order)) .map(GridClientNode::nodeId) .orElse(null); VisorBaselineTaskResult res = executeTaskByNameOnNode( client, VisorBaselineTask.class.getName(), toVisorArguments(baselineArgs), coordinatorId, clientCfg ); baselinePrint0(res, logger); } catch (Throwable e) { logger.severe("Failed to execute baseline command='" + baselineArgs.getCmd().text() + "'"); logger.severe(CommandLogger.errorMessage(e)); throw e; } return null; } /** {@inheritDoc} */ @Override public BaselineArguments arg() { return baselineArgs; } /** * Prepare task argument. * * @param args Argument from command line. * @return Task argument. */ private VisorBaselineTaskArg toVisorArguments(BaselineArguments args) { VisorBaselineAutoAdjustSettings settings = args.getCmd() == BaselineSubcommands.AUTO_ADJUST ? new VisorBaselineAutoAdjustSettings(args.getEnableAutoAdjust(), args.getSoftBaselineTimeout()) : null; return new VisorBaselineTaskArg(args.getCmd().visorBaselineOperation(), args.getTopVer(), args.getConsistentIds(), settings); } /** * Print baseline topology. * * @param res Task result with baseline topology. */ private void baselinePrint0(VisorBaselineTaskResult res, Logger logger) { logger.info("Cluster state: " + (res.isActive() ? "active" : "inactive")); logger.info("Current topology version: " + res.getTopologyVersion()); VisorBaselineAutoAdjustSettings autoAdjustSettings = res.getAutoAdjustSettings(); if (autoAdjustSettings != null) { logger.info("Baseline auto adjustment " + (TRUE.equals(autoAdjustSettings.getEnabled()) ? "enabled" : "disabled") + ": softTimeout=" + autoAdjustSettings.getSoftTimeout() ); } if (autoAdjustSettings.enabled) { if (res.isBaselineAdjustInProgress()) logger.info("Baseline auto-adjust is in progress"); else if (res.getRemainingTimeToBaselineAdjust() < 0) logger.info("Baseline auto-adjust are not scheduled"); else logger.info("Baseline auto-adjust will happen in '" + res.getRemainingTimeToBaselineAdjust() + "' ms"); } logger.info(""); Map<String, VisorBaselineNode> baseline = res.getBaseline(); Map<String, VisorBaselineNode> srvs = res.getServers(); // if task runs on a node with VisorBaselineNode of old version (V1) we'll get order=null for all nodes. Function<VisorBaselineNode, String> extractFormattedAddrs = node -> { Stream<String> sortedByIpHosts = Optional.ofNullable(node) .map(addrs -> node.getAddrs()) .orElse(Collections.emptyList()) .stream() .sorted(Comparator .comparing(resolvedAddr -> new VisorTaskUtils.SortableAddress(resolvedAddr.address()))) .map(resolvedAddr -> { if (!resolvedAddr.hostname().equals(resolvedAddr.address())) return resolvedAddr.hostname() + "/" + resolvedAddr.address(); else return resolvedAddr.address(); }); if (verbose) { String hosts = String.join(",", sortedByIpHosts.collect(Collectors.toList())); if (!hosts.isEmpty()) return ", Addresses=" + hosts; else return ""; } else return sortedByIpHosts.findFirst().map(ip -> ", Address=" + ip).orElse(""); }; String crdStr = srvs.values().stream() // check for not null .filter(node -> node.getOrder() != null) .min(Comparator.comparing(VisorBaselineNode::getOrder)) // format .map(crd -> " (Coordinator: ConsistentId=" + crd.getConsistentId() + extractFormattedAddrs.apply(crd) + ", Order=" + crd.getOrder() + ")") .orElse(""); logger.info("Current topology version: " + res.getTopologyVersion() + crdStr); logger.info(""); if (F.isEmpty(baseline)) logger.info("Baseline nodes not found."); else { logger.info("Baseline nodes:"); for (VisorBaselineNode node : baseline.values()) { VisorBaselineNode srvNode = srvs.get(node.getConsistentId()); String state = ", State=" + (srvNode != null ? "ONLINE" : "OFFLINE"); String order = srvNode != null ? ", Order=" + srvNode.getOrder() : ""; logger.info(DOUBLE_INDENT + "ConsistentId=" + node.getConsistentId() + extractFormattedAddrs.apply(srvNode) + state + order); } logger.info(DELIM); logger.info("Number of baseline nodes: " + baseline.size()); logger.info(""); List<VisorBaselineNode> others = new ArrayList<>(); for (VisorBaselineNode node : srvs.values()) { if (!baseline.containsKey(node.getConsistentId())) others.add(node); } if (F.isEmpty(others)) logger.info("Other nodes not found."); else { logger.info("Other nodes:"); for (VisorBaselineNode node : others) logger.info(DOUBLE_INDENT + "ConsistentId=" + node.getConsistentId() + ", Order=" + node.getOrder()); logger.info("Number of other nodes: " + others.size()); } } } /** {@inheritDoc} */ @Override public void parseArguments(CommandArgIterator argIter) { if (!argIter.hasNextSubArg()) { this.baselineArgs = new BaselineArguments.Builder(BaselineSubcommands.COLLECT).build(); return; } BaselineSubcommands cmd = of(argIter.nextArg("Expected baseline action")); if (cmd == null) throw new IllegalArgumentException("Expected correct baseline action"); BaselineArguments.Builder baselineArgs = new BaselineArguments.Builder(cmd); switch (cmd) { case ADD: case REMOVE: case SET: Set<String> ids = argIter.nextStringSet("list of consistent ids"); if (F.isEmpty(ids)) throw new IllegalArgumentException("Empty list of consistent IDs"); baselineArgs.withConsistentIds(new ArrayList<>(ids)); break; case VERSION: baselineArgs.withTopVer(argIter.nextNonNegativeLongArg("topology version")); break; case AUTO_ADJUST: do { AutoAdjustCommandArg autoAdjustArg = CommandArgUtils.of( argIter.nextArg("Expected one of auto-adjust arguments"), AutoAdjustCommandArg.class ); if (autoAdjustArg == null) throw new IllegalArgumentException("Expected one of auto-adjust arguments"); if (autoAdjustArg == AutoAdjustCommandArg.ENABLE || autoAdjustArg == AutoAdjustCommandArg.DISABLE) baselineArgs.withEnable(autoAdjustArg == AutoAdjustCommandArg.ENABLE); if (autoAdjustArg == AutoAdjustCommandArg.TIMEOUT) baselineArgs.withSoftBaselineTimeout(argIter.nextNonNegativeLongArg("soft timeout")); } while (argIter.hasNextSubArg()); break; } this.baselineArgs = baselineArgs.build(); } /** {@inheritDoc} */ @Override public String name() { return BASELINE.toCommandName(); } }
3e11a3dbd5c0984446c4e6c8f127be894aae85a2
41,586
java
Java
Core/test/com/blockwithme/lessobjects/juint/TestListChild.java
skunkiferous/LessObjects
a31c4442f281eaa8c953688badf914857ed461c4
[ "Apache-2.0" ]
null
null
null
Core/test/com/blockwithme/lessobjects/juint/TestListChild.java
skunkiferous/LessObjects
a31c4442f281eaa8c953688badf914857ed461c4
[ "Apache-2.0" ]
null
null
null
Core/test/com/blockwithme/lessobjects/juint/TestListChild.java
skunkiferous/LessObjects
a31c4442f281eaa8c953688badf914857ed461c4
[ "Apache-2.0" ]
null
null
null
45.153094
94
0.493435
7,445
/******************************************************************************* * Copyright 2013 Sebastien Diot * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ // $codepro.audit.disable package com.blockwithme.lessobjects.juint; import static com.blockwithme.lessobjects.juint.Constants.COMPILERS; import static com.blockwithme.lessobjects.juint.Constants.DELTA; import static com.blockwithme.lessobjects.juint.Constants.FACTORY; import static org.junit.Assert.assertEquals; import java.util.Iterator; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import com.blockwithme.lessobjects.Compiler; import com.blockwithme.lessobjects.Field; import com.blockwithme.lessobjects.Struct; import com.blockwithme.lessobjects.beans.BooleanValueChange; import com.blockwithme.lessobjects.beans.ByteValueChange; import com.blockwithme.lessobjects.beans.CharValueChange; import com.blockwithme.lessobjects.beans.DoubleValueChange; import com.blockwithme.lessobjects.beans.FloatValueChange; import com.blockwithme.lessobjects.beans.IntValueChange; import com.blockwithme.lessobjects.beans.LongValueChange; import com.blockwithme.lessobjects.beans.ShortValueChange; import com.blockwithme.lessobjects.beans.ValueChange; import com.blockwithme.lessobjects.fields.object.ObjectField; import com.blockwithme.lessobjects.fields.primitive.BooleanField; import com.blockwithme.lessobjects.fields.primitive.ByteField; import com.blockwithme.lessobjects.fields.primitive.CharField; import com.blockwithme.lessobjects.fields.primitive.DoubleField; import com.blockwithme.lessobjects.fields.primitive.FloatField; import com.blockwithme.lessobjects.fields.primitive.IntField; import com.blockwithme.lessobjects.fields.primitive.LongField; import com.blockwithme.lessobjects.fields.primitive.ShortField; import com.blockwithme.lessobjects.storage.ActionSet; import com.blockwithme.lessobjects.storage.Storage; //CHECKSTYLE IGNORE FOR NEXT 400 LINES @SuppressWarnings({ "PMD", "all" }) public class TestListChild extends TestData { /** The factory. */ private CompiledStorage[] COMPILED; @BeforeClass public static void setUpClass() { } @Before public void setup() { Struct tmp1 = new Struct("ListStruct", new Struct[] {}, new Field<?, ?>[] { FACTORY.newIntField("intField"), FACTORY.newLongField("listLong"), FACTORY.newByteField("listByte"), FACTORY.newShortField("listShort"), FACTORY.newFloatField("listFloat"), FACTORY.newDoubleField("listDouble"), FACTORY.newCharField("listChar"), FACTORY.newBooleanField("listBoolean"), FACTORY.newIntField("listInt"), FACTORY.newStringField("listString") }); tmp1 = tmp1.setList(true); final Struct tmp = new Struct("BaseStruct", new Struct[] { tmp1 }, new Field<?, ?>[] { FACTORY.newIntField("intField"), FACTORY.newLongField("longField"), FACTORY.newByteField("byteField"), FACTORY.newShortField("shortField"), FACTORY.newFloatField("floatField"), FACTORY.newDoubleField("doubleField"), FACTORY.newCharField("charField"), FACTORY.newBooleanField("booleanField1"), FACTORY.newBooleanField("booleanField2"), FACTORY.newIntField("intField2"), FACTORY.newStringField("stringField1") }); int count = 0; COMPILED = new CompiledStorage[COMPILERS.length]; for (final Compiler cmplr : COMPILERS) { COMPILED[count] = new CompiledStorage(); COMPILED[count].compiledStructs = cmplr.compile(tmp); COMPILED[count].storage = cmplr.initStorage( COMPILED[count].compiledStructs, _CAPACITY); COMPILED[count].compiler = cmplr; count++; } } @Test public void testModifyBoolean() { final boolean[] booleans = booleans(); for (final CompiledStorage strg : COMPILED) { String message = "Boolean Field Test failed for Compiler -" + strg.compiler.compilerName(); final BooleanField bool1 = (BooleanField) strg.compiledStructs .field("booleanField1"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); strg.storage.write(bool1, booleans[i]); } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); assertEquals(message, booleans[i], strg.storage.read(bool1)); } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); message = "Failed Asserting 'change' list for Compiler -" + strg.compiler.compilerName() + " Boolean field -" + bool1.name(); for (int count = 0; count < _CAPACITY; count++) { if (booleans[count]) { final BooleanValueChange change = (BooleanValueChange) changes .next(); assertEquals(message, bool1, change.field()); assertEquals(message + " Struct index -", count, change.structureIndex()); assertEquals(message + " old value -", false, change.booleanOldValue()); assertEquals(message + " new value -", booleans[count], change.booleanNewValue()); assertEquals(message + " old value -", false, change.oldValue()); assertEquals(message + " new value -", booleans[count], change.newValue()); } } } } @Test public void testModifyByte() { final byte[] bytes = bytes(); for (final CompiledStorage strg : COMPILED) { String message = "Byte Field Test failed for Compiler -" + strg.compiler.compilerName(); final ByteField byteField = (ByteField) strg.compiledStructs .field("byteField"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); strg.storage.write(byteField, bytes[i]); } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); assertEquals(message, bytes[i], strg.storage.read(byteField)); } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); message = "Failed Asserting 'change' list for Compiler -" + strg.compiler.compilerName() + " Byte field -" + byteField.name(); for (int count = 0; count < _CAPACITY; count++) { if (bytes[count] != 0) { final ByteValueChange change = (ByteValueChange) changes .next(); assertEquals(message, byteField, change.field()); assertEquals(message + " Struct index -", count, change.structureIndex()); assertEquals(message + " old value -", 0, change.byteOldValue()); assertEquals(message + " new value -", bytes[count], change.byteNewValue()); assertEquals(message + " old value -", Byte.valueOf((byte) 0), change.oldValue()); assertEquals(message + " new value -", Byte.valueOf(bytes[count]), change.newValue()); } } } } @Test public void testModifyChar() { final boolean[] booleans = booleans(); final byte[] bytes = bytes(); final char[] chars = chars(); final double[] doubles = doubles(); final float[] floats = floats(); final long[] int_longs = int_longs(); final int[] ints = ints(); final long[] longs = longs(); final short[] shorts = shorts(); final String[] strings = strings(); for (final CompiledStorage strg : COMPILED) { String message = "Boolean Field Test failed for Compiler -" + strg.compiler.compilerName(); final CharField charField = (CharField) strg.compiledStructs .field("charField"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); strg.storage.write(charField, chars[i]); } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); assertEquals(message, chars[i], strg.storage.read(charField)); } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); message = "Failed Asserting 'change' list for Compiler -" + strg.compiler.compilerName() + " Char field -" + charField.name(); for (int count = 0; count < _CAPACITY; count++) { if (chars[count] != 0) { final CharValueChange change = (CharValueChange) changes .next(); assertEquals(message, charField, change.field()); assertEquals(message + " Struct index -", count, change.structureIndex()); assertEquals(message + " old value -", 0, change.charOldValue()); assertEquals(message + " new value -", chars[count], change.charNewValue()); assertEquals(message + " old value -", Character.valueOf((char) 0), change.oldValue()); assertEquals(message + " new value -", Character.valueOf(chars[count]), change.newValue()); } } } } @Test public void testModifyDouble() { final boolean[] booleans = booleans(); final byte[] bytes = bytes(); final char[] chars = chars(); final double[] doubles = doubles(); final float[] floats = floats(); final long[] int_longs = int_longs(); final int[] ints = ints(); final long[] longs = longs(); final short[] shorts = shorts(); final String[] strings = strings(); for (final CompiledStorage strg : COMPILED) { String message = "Double Field Test failed for Compiler -" + strg.compiler.compilerName(); final DoubleField doubleField = (DoubleField) strg.compiledStructs .field("doubleField"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); strg.storage.write(doubleField, doubles[i]); } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); assertEquals(message, doubles[i], strg.storage.read(doubleField), DELTA); } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); message = "Failed Asserting 'change' list for Compiler -" + strg.compiler.compilerName() + " Double field -" + doubleField.name(); for (int count = 0; count < _CAPACITY; count++) { if (doubles[count] != 0) { final DoubleValueChange change = (DoubleValueChange) changes .next(); assertEquals(message, doubleField, change.field()); assertEquals(message + " Struct index -", count, change.structureIndex()); assertEquals(message + " old value -", 0, change.doubleOldValue(), DELTA); assertEquals(message + " new value -", doubles[count], change.doubleNewValue(), DELTA); assertEquals(message + " old value -", Double.valueOf(0), change.oldValue()); assertEquals(message + " new value -", Double.valueOf(doubles[count]), change.newValue()); } } } } @Test public void testModifyFloat() { final boolean[] booleans = booleans(); final byte[] bytes = bytes(); final char[] chars = chars(); final double[] doubles = doubles(); final float[] floats = floats(); final long[] int_longs = int_longs(); final int[] ints = ints(); final long[] longs = longs(); final short[] shorts = shorts(); final String[] strings = strings(); for (final CompiledStorage strg : COMPILED) { String message = "Float Field Test failed for Compiler -" + strg.compiler.compilerName(); final FloatField floatField = (FloatField) strg.compiledStructs .field("floatField"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); strg.storage.write(floatField, floats[i]); } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); assertEquals(message, floats[i], strg.storage.read(floatField), DELTA); } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); message = "Failed Asserting 'change' list for Compiler -" + strg.compiler.compilerName() + " Float field -" + floatField.name(); for (int count = 0; count < _CAPACITY; count++) { if (floats[count] != 0) { final FloatValueChange change = (FloatValueChange) changes .next(); assertEquals(message, floatField, change.field()); assertEquals(message + " Struct index -", count, change.structureIndex()); assertEquals(message + " old value -", 0, change.floatOldValue(), DELTA); assertEquals(message + " new value -", floats[count], change.floatNewValue(), DELTA); assertEquals(message + " old value -", Float.valueOf(0), change.oldValue()); assertEquals(message + " new value -", Float.valueOf(floats[count]), change.newValue()); } } } } @Test public void testModifyInt() { final boolean[] booleans = booleans(); final byte[] bytes = bytes(); final char[] chars = chars(); final double[] doubles = doubles(); final float[] floats = floats(); final long[] int_longs = int_longs(); final int[] ints = ints(); final long[] longs = longs(); final short[] shorts = shorts(); final String[] strings = strings(); for (final CompiledStorage strg : COMPILED) { String message = "Int Field Test failed for Compiler -" + strg.compiler.compilerName(); final IntField intField = (IntField) strg.compiledStructs .field("intField"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); strg.storage.write(intField, ints[i]); } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); assertEquals(message, ints[i], strg.storage.read(intField)); } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); message = "Failed Asserting 'change' list for Compiler -" + strg.compiler.compilerName() + " Int field -" + intField.name(); for (int count = 0; count < _CAPACITY; count++) { if (ints[count] != 0) { final IntValueChange change = (IntValueChange) changes .next(); assertEquals(message, intField, change.field()); assertEquals(message + " Struct index -", count, change.structureIndex()); assertEquals(message + " old value -", 0, change.intOldValue()); assertEquals(message + " new value -", ints[count], change.intNewValue()); assertEquals(message + " old value -", Integer.valueOf(0), change.oldValue()); assertEquals(message + " new value -", Integer.valueOf(ints[count]), change.newValue()); } } } } @Test public void testModifyListBoolean() { final boolean[] booleans = booleans(); final byte[] bytes = bytes(); final char[] chars = chars(); final double[] doubles = doubles(); final float[] floats = floats(); final long[] int_longs = int_longs(); final int[] ints = ints(); final long[] longs = longs(); final short[] shorts = shorts(); final String[] strings = strings(); for (final CompiledStorage strg : COMPILED) { final Struct list = strg.compiledStructs.child("ListStruct"); final BooleanField bool = list.field("listBoolean"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); final Storage storage = strg.storage .createOrClearList(list, 10); for (int j = 0; j < _CAPACITY; j++) { storage.selectStructure(j); storage.write(bool, booleans[j]); } } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); final Storage storage = strg.storage.list(list); for (int j = 0; j < _CAPACITY; j++) { storage.selectStructure(j); assertEquals(booleans[j], storage.read(bool)); } } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); for (int count = 0; count < _CAPACITY; count++) { for (int j = 0; j < _CAPACITY; j++) { if (booleans[j]) { final BooleanValueChange change = (BooleanValueChange) changes .next(); assertEquals(bool, change.field()); assertEquals(j, change.structureIndex()); assertEquals(false, change.booleanOldValue()); assertEquals(booleans[j], change.booleanNewValue()); assertEquals(false, change.oldValue()); assertEquals(booleans[j], change.newValue()); } } } } } @Test public void testModifyListByte() { final byte[] bytes = bytes(); for (final CompiledStorage strg : COMPILED) { final Struct list = strg.compiledStructs.child("ListStruct"); final ByteField fByte = list.field("listByte"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); final Storage storage = strg.storage .createOrClearList(list, 10); for (int j = 0; j < _CAPACITY; j++) { storage.selectStructure(j); storage.write(fByte, bytes[j]); } } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); final Storage storage = strg.storage.list(list); for (int j = 0; j < _CAPACITY; j++) { storage.selectStructure(j); assertEquals(bytes[j], storage.read(fByte)); } } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); for (int count = 0; count < _CAPACITY; count++) { for (int j = 0; j < _CAPACITY; j++) { if (bytes[j] != 0) { final ByteValueChange change = (ByteValueChange) changes .next(); assertEquals(fByte, change.field()); assertEquals(j, change.structureIndex()); assertEquals((byte) 0, change.byteOldValue()); assertEquals(bytes[j], change.byteNewValue()); assertEquals((byte) 0, (byte) change.oldValue()); assertEquals(bytes[j], (byte) change.newValue()); } } } } } @Test public void testModifyListChar() { final char[] chars = chars(); for (final CompiledStorage strg : COMPILED) { final Struct list = strg.compiledStructs.child("ListStruct"); final CharField fChar = list.field("listChar"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); final Storage storage = strg.storage .createOrClearList(list, 10); for (int j = 0; j < _CAPACITY; j++) { storage.selectStructure(j); storage.write(fChar, chars[j]); } } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); final Storage storage = strg.storage.list(list); for (int j = 0; j < _CAPACITY; j++) { storage.selectStructure(j); assertEquals(chars[j], storage.read(fChar)); } } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); for (int count = 0; count < _CAPACITY; count++) { for (int j = 0; j < _CAPACITY; j++) { if (chars[j] != 0) { final CharValueChange change = (CharValueChange) changes .next(); assertEquals(fChar, change.field()); assertEquals(j, change.structureIndex()); assertEquals((char) 0, change.charOldValue()); assertEquals(chars[j], change.charNewValue()); assertEquals((char) 0, (char) change.oldValue()); assertEquals(chars[j], (char) change.newValue()); } } } } } @Test public void testModifyListDouble() { final double[] doubles = doubles(); for (final CompiledStorage strg : COMPILED) { final Struct list = strg.compiledStructs.child("ListStruct"); final DoubleField fDouble = list.field("listDouble"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); final Storage storage = strg.storage .createOrClearList(list, 10); for (int j = 0; j < _CAPACITY; j++) { storage.selectStructure(j); storage.write(fDouble, doubles[j]); } } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); final Storage storage = strg.storage.list(list); for (int j = 0; j < _CAPACITY; j++) { storage.selectStructure(j); assertEquals(doubles[j], storage.read(fDouble), DELTA); } } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); for (int count = 0; count < _CAPACITY; count++) { for (int j = 0; j < _CAPACITY; j++) { if (doubles[j] != 0) { final DoubleValueChange change = (DoubleValueChange) changes .next(); assertEquals(fDouble, change.field()); assertEquals(j, change.structureIndex()); assertEquals(0D, change.doubleOldValue(), DELTA); assertEquals(doubles[j], change.doubleNewValue(), DELTA); assertEquals(0D, change.oldValue(), DELTA); assertEquals(doubles[j], change.newValue(), DELTA); } } } } } @Test public void testModifyListFloat() { final float[] floats = floats(); for (final CompiledStorage strg : COMPILED) { final Struct list = strg.compiledStructs.child("ListStruct"); final FloatField fFloat = list.field("listFloat"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); final Storage storage = strg.storage .createOrClearList(list, 10); for (int j = 0; j < _CAPACITY; j++) { storage.selectStructure(j); storage.write(fFloat, floats[j]); } } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); final Storage storage = strg.storage.list(list); for (int j = 0; j < _CAPACITY; j++) { storage.selectStructure(j); assertEquals(floats[j], storage.read(fFloat), DELTA); } } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); for (int count = 0; count < _CAPACITY; count++) { for (int j = 0; j < _CAPACITY; j++) { if (floats[j] != 0) { final FloatValueChange change = (FloatValueChange) changes .next(); assertEquals(fFloat, change.field()); assertEquals(j, change.structureIndex()); assertEquals(0f, change.floatOldValue(), DELTA); assertEquals(floats[j], change.floatNewValue(), DELTA); assertEquals(0f, change.oldValue(), DELTA); assertEquals(floats[j], change.newValue(), DELTA); } } } } } @Test public void testModifyListInt() { final boolean[] booleans = booleans(); final byte[] bytes = bytes(); final char[] chars = chars(); final double[] doubles = doubles(); final float[] floats = floats(); final long[] int_longs = int_longs(); final int[] ints = ints(); final long[] longs = longs(); final short[] shorts = shorts(); final String[] strings = strings(); for (final CompiledStorage strg : COMPILED) { final Struct list = strg.compiledStructs.child("ListStruct"); final IntField fInt = list.field("listInt"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); final Storage storage = strg.storage .createOrClearList(list, 10); for (int j = 0; j < _CAPACITY; j++) { storage.selectStructure(j); storage.write(fInt, ints[j]); } } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); final Storage storage = strg.storage.list(list); for (int j = 0; j < _CAPACITY; j++) { storage.selectStructure(j); assertEquals(ints[j], storage.read(fInt)); } } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); for (int count = 0; count < _CAPACITY; count++) { for (int j = 0; j < _CAPACITY; j++) { if (ints[j] != 0) { final IntValueChange change = (IntValueChange) changes .next(); assertEquals(fInt, change.field()); assertEquals(j, change.structureIndex()); assertEquals(0, change.intOldValue()); assertEquals(ints[j], change.intNewValue()); assertEquals(0, (int) change.oldValue()); assertEquals(ints[j], (int) change.newValue()); } } } } } @Test public void testModifyListLong() { final long[] longs = longs(); for (final CompiledStorage strg : COMPILED) { final Struct list = strg.compiledStructs.child("ListStruct"); final LongField fLong = list.field("listLong"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); final Storage storage = strg.storage .createOrClearList(list, 10); for (int j = 0; j < _CAPACITY; j++) { storage.selectStructure(j); storage.write(fLong, longs[j]); } } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); final Storage storage = strg.storage.list(list); for (int j = 0; j < _CAPACITY; j++) { storage.selectStructure(j); assertEquals(longs[j], storage.read(fLong)); } } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); for (int count = 0; count < _CAPACITY; count++) { for (int j = 0; j < _CAPACITY; j++) { if (longs[j] != 0) { final LongValueChange change = (LongValueChange) changes .next(); assertEquals(fLong, change.field()); assertEquals(j, change.structureIndex()); assertEquals(0L, change.longOldValue()); assertEquals(longs[j], change.longNewValue()); assertEquals(0L, (long) change.oldValue()); assertEquals(longs[j], (long) change.newValue()); } } } } } @Test public void testModifyLong() { final long[] longs = longs(); for (final CompiledStorage strg : COMPILED) { String message = "Long Field Test failed for Compiler -" + strg.compiler.compilerName(); final LongField longField = (LongField) strg.compiledStructs .field("longField"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); strg.storage.write(longField, longs[i]); } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); assertEquals(message, longs[i], strg.storage.read(longField)); } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); message = "Failed Asserting 'change' list for Compiler -" + strg.compiler.compilerName() + " Int field -" + longField.name(); for (int count = 0; count < _CAPACITY; count++) { if (longs[count] != 0) { final LongValueChange change = (LongValueChange) changes .next(); assertEquals(message, longField, change.field()); assertEquals(message + " Struct index -", count, change.structureIndex()); assertEquals(message + " old value -", 0, change.longOldValue()); assertEquals(message + " new value -", longs[count], change.longNewValue()); assertEquals(message + " old value -", Long.valueOf(0), change.oldValue()); assertEquals(message + " new value -", Long.valueOf(longs[count]), change.newValue()); } } } } @Test public void testModifyShort() { final boolean[] booleans = booleans(); final byte[] bytes = bytes(); final char[] chars = chars(); final double[] doubles = doubles(); final float[] floats = floats(); final long[] int_longs = int_longs(); final int[] ints = ints(); final long[] longs = longs(); final short[] shorts = shorts(); final String[] strings = strings(); for (final CompiledStorage strg : COMPILED) { String message = "Short Field Test failed for Compiler -" + strg.compiler.compilerName(); final ShortField shortField = (ShortField) strg.compiledStructs .field("shortField"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); strg.storage.write(shortField, shorts[i]); } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); assertEquals(message, shorts[i], strg.storage.read(shortField)); } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); message = "Failed Asserting 'change' list for Compiler -" + strg.compiler.compilerName() + " short field -" + shortField.name(); for (int count = 0; count < _CAPACITY; count++) { if (shorts[count] != 0) { final ShortValueChange change = (ShortValueChange) changes .next(); assertEquals(message, shortField, change.field()); assertEquals(message + " Struct index -", count, change.structureIndex()); assertEquals(message + " old value -", 0, change.shortOldValue()); assertEquals(message + " new value -", shorts[count], change.shortNewValue()); assertEquals(message + " old value -", Short.valueOf((short) 0), change.oldValue()); assertEquals(message + " new value -", Short.valueOf(shorts[count]), change.newValue()); } } } } @Test public void testModifyString() { final int[] ints = ints(); for (final CompiledStorage strg : COMPILED) { String message = "String Field Test failed for Compiler -" + strg.compiler.compilerName(); final ObjectField<String, ?> str1 = (ObjectField<String, ?>) strg.compiledStructs .field("stringField1"); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); strg.storage.write(str1, "test" + ints[i]); } final ActionSet actions = strg.storage.transactionManager() .commit(); for (int i = 0; i < _CAPACITY; i++) { strg.storage.selectStructure(i); assertEquals(message, "test" + ints[i], strg.storage.read(str1)); } final Iterator<ValueChange<?>> changes = actions.changeRecords() .changes(strg.storage.rootStruct()); message = "Failed Asserting 'change' list for Compiler -" + strg.compiler.compilerName() + " String field -" + str1.name(); for (int count = 0; count < _CAPACITY; count++) { final ValueChange change = changes.next(); assertEquals(message, str1, change.field()); assertEquals(message + " Struct index -", count, change.structureIndex()); assertEquals(message + " old value -", null, change.oldValue()); assertEquals(message + " new value -", "test" + ints[count], change.newValue()); } } } @Test public void testSchema() { for (final CompiledStorage strg : COMPILED) { final Struct compiledStructs = strg.compiledStructs; checkSchema(compiledStructs); } } }
3e11a3f0a7c379c2d38c83e036d930673bc342b2
265
java
Java
to-do/src/main/java/pl/sda/todo/service/IdGeneratorService.java
mwygledowski/Jav19ldz
b1fb4f587b6d6695722046b5d3a0fa88dd68c20c
[ "Apache-2.0" ]
null
null
null
to-do/src/main/java/pl/sda/todo/service/IdGeneratorService.java
mwygledowski/Jav19ldz
b1fb4f587b6d6695722046b5d3a0fa88dd68c20c
[ "Apache-2.0" ]
3
2021-12-10T01:22:40.000Z
2021-12-14T21:33:16.000Z
to-do/src/main/java/pl/sda/todo/service/IdGeneratorService.java
mwygledowski/Jav19ldz
b1fb4f587b6d6695722046b5d3a0fa88dd68c20c
[ "Apache-2.0" ]
null
null
null
20.384615
60
0.732075
7,446
package pl.sda.todo.service; import java.util.concurrent.atomic.AtomicLong; public class IdGeneratorService { private static AtomicLong currentId = new AtomicLong(0); public static long generateId() { return currentId.incrementAndGet(); } }
3e11a475aa2207c7bc48a965075995f85e8cca3f
1,246
java
Java
BCH/library/api/fermat-bch-api/src/main/java/com/bitdubai/fermat_bch_api/layer/crypto_network/bitcoin/interfaces/BitcoinNetworkConfiguration.java
guillermo20/fermat
f0a912adb66a439023ec4e70b821ba397e0f7760
[ "MIT" ]
3
2016-03-23T05:26:51.000Z
2016-03-24T14:33:05.000Z
BCH/library/api/fermat-bch-api/src/main/java/com/bitdubai/fermat_bch_api/layer/crypto_network/bitcoin/interfaces/BitcoinNetworkConfiguration.java
yalayn/fermat
f0a912adb66a439023ec4e70b821ba397e0f7760
[ "MIT" ]
17
2015-11-20T20:43:17.000Z
2016-07-25T20:35:49.000Z
BCH/library/api/fermat-bch-api/src/main/java/com/bitdubai/fermat_bch_api/layer/crypto_network/bitcoin/interfaces/BitcoinNetworkConfiguration.java
yalayn/fermat
f0a912adb66a439023ec4e70b821ba397e0f7760
[ "MIT" ]
26
2015-11-20T13:20:23.000Z
2022-03-11T07:50:06.000Z
31.2
91
0.724359
7,447
package com.bitdubai.fermat_bch_api.layer.crypto_network.bitcoin.interfaces; import org.bitcoinj.core.NetworkParameters; import org.bitcoinj.params.RegTestParams; import org.bitcoinj.params.TestNet3Params; /** * Created by rodrigo on 9/19/15. */ public interface BitcoinNetworkConfiguration { /** * Network that we are using as Default in the platform */ public static final NetworkParameters DEFAULT_NETWORK_PARAMETERS = RegTestParams.get(); /** * RegTest client configuration */ public static final String BITCOIN_FULL_NODE_1_IP = "172.16.31.10"; public static final int BITCOIN_FULL_NODE_1_PORT = 19020; public static final String BITCOIN_FULL_NODE_2_IP = "172.16.58.3"; public static final int BITCOIN_FULL_NODE_2_PORT = 19030; /** * Agent name and version */ public static final String USER_AGENT_NAME = "Fermat Agent"; public static final String USER_AGENT_VERSION ="2.1.0"; /** * amount of blocks depth to consider transaction IRReversible */ public static final int IRREVERSIBLE_BLOCK_DEPTH = 3; /** * Amount of Timeout minutes for broadcasting transactions */ public static final int TRANSACTION_BROADCAST_TIMEOUT = 5; }
3e11a4fafa213652c00217fe4f5726bc8f016fa7
665
java
Java
mogu_base/src/main/java/com/github/cccy0/mogublog/base/enums/EQiNiuArea.java
cccy0/cy_mogu_blog
8b33045339a2e0732ebbd683b8923a96db611c45
[ "Apache-2.0" ]
null
null
null
mogu_base/src/main/java/com/github/cccy0/mogublog/base/enums/EQiNiuArea.java
cccy0/cy_mogu_blog
8b33045339a2e0732ebbd683b8923a96db611c45
[ "Apache-2.0" ]
null
null
null
mogu_base/src/main/java/com/github/cccy0/mogublog/base/enums/EQiNiuArea.java
cccy0/cy_mogu_blog
8b33045339a2e0732ebbd683b8923a96db611c45
[ "Apache-2.0" ]
null
null
null
12.788462
45
0.44812
7,448
package com.github.cccy0.mogublog.base.enums; /** * 七牛云存储空间枚举类 * * @Author: 陌溪 * @Date: 2020年1月23日09:18:00 */ public enum EQiNiuArea { /** * 华东 */ z0("z0", "华东"), /** * 华北 */ z1("z1", "华北"), /** * 华南 */ z2("z2", "华南"), /** * 北美 */ na0("na0", "北美"), /** * 东南亚 */ as0("as0", "东南亚"); private final String code; private final String name; EQiNiuArea(String code, String name) { this.code = code; this.name = name; } public String getCode() { return code; } public String getName() { return name; } }
3e11a5a63f303fa5952ce2fcf1bb237b88af1aef
8,444
java
Java
emotesMain/src/main/java/io/github/kosmx/emotes/main/screen/widget/AbstractFastChooseWidget.java
RDKRACZ/emotes
4a087760b64df3a6a776290ef892827a25300e2d
[ "CC-BY-4.0" ]
63
2020-07-27T08:19:05.000Z
2022-03-20T11:57:57.000Z
emotesMain/src/main/java/io/github/kosmx/emotes/main/screen/widget/AbstractFastChooseWidget.java
Jerrynicki/emotes
6bd190b36c834a9ab9cc2b4e7e4df154453de581
[ "CC-BY-4.0" ]
72
2020-07-23T11:12:06.000Z
2022-03-25T08:41:34.000Z
emotesMain/src/main/java/io/github/kosmx/emotes/main/screen/widget/AbstractFastChooseWidget.java
Jerrynicki/emotes
6bd190b36c834a9ab9cc2b4e7e4df154453de581
[ "CC-BY-4.0" ]
25
2020-10-20T01:09:45.000Z
2022-02-25T17:16:03.000Z
37.696429
268
0.591663
7,449
package io.github.kosmx.emotes.main.screen.widget; import io.github.kosmx.emotes.common.tools.MathHelper; import io.github.kosmx.emotes.executor.EmoteInstance; import io.github.kosmx.emotes.executor.dataTypes.IIdentifier; import io.github.kosmx.emotes.executor.dataTypes.Text; import io.github.kosmx.emotes.main.EmoteHolder; import io.github.kosmx.emotes.main.config.ClientConfig; import javax.annotation.Nullable; import java.util.ArrayList; import java.util.UUID; import java.util.logging.Level; /** * Stuff fo override * void render(MATRIX, int mouseX, int mouseY, float delta) * boolean onMouseClicked * void isMouseHover * @param <MATRIX> Minecraft's MatrixStack */ public abstract class AbstractFastChooseWidget<MATRIX, WIDGET> implements IWidgetLogic<MATRIX, WIDGET> { public int x; public int y; protected int size; //protected final FastChooseElement[] elements = new FastChooseElement[8]; protected final ArrayList<FastChooseElement> elements = new ArrayList<>(); private boolean hovered; private final IIdentifier TEXTURE = ((ClientConfig) EmoteInstance.config).dark.get() ? EmoteInstance.instance.getDefaults().newIdentifier("textures/gui/fastchoose_dark.png") : EmoteInstance.instance.getDefaults().newIdentifier("textures/gui/fastchoose_light.png"); private AbstractFastChooseWidget(){ elements.add( new FastChooseElement(0, 22.5f)); elements.add( new FastChooseElement(1, 67.5f)); elements.add( new FastChooseElement(2, 157.5f)); elements.add( new FastChooseElement(3, 112.5f)); elements.add( new FastChooseElement(4, 337.5f)); elements.add( new FastChooseElement(5, 292.5f)); elements.add( new FastChooseElement(6, 202.5f)); elements.add( new FastChooseElement(7, 247.5f)); } public AbstractFastChooseWidget(int x, int y, int size){ this(); this.x = x; this.y = y; this.size = size; //It's a square with same width and height } public void drawCenteredText(MATRIX matrixStack, Text stringRenderable, float deg){ drawCenteredText(matrixStack, stringRenderable, (float) (((float) (this.x + this.size / 2)) + size * 0.4 * Math.sin(deg * 0.0174533)), (float) (((float) (this.y + this.size / 2)) + size * 0.4 * Math.cos(deg * 0.0174533))); } public void drawCenteredText(MATRIX matrices, Text stringRenderable, float x, float y){ int c = ((ClientConfig)EmoteInstance.config).dark.get() ? 255 : 0; //:D textDraw(matrices, stringRenderable, x - (float) textRendererGetWidth(stringRenderable) / 2, y - 2, MathHelper.colorHelper(c, c, c, 1)); } @Nullable protected FastChooseElement getActivePart(int mouseX, int mouseY){ int x = mouseX - this.x - this.size / 2; int y = mouseY - this.y - this.size / 2; int i = 0; if(x == 0){ return null; }else if(x < 0){ i += 4; } if(y == 0){ return null; }else if(y < 0){ i += 2; } if(Math.abs(x) == Math.abs(y)){ return null; }else if(Math.abs(x) > Math.abs(y)){ i++; } return elements.get(i); } public void render(MATRIX matrices, int mouseX, int mouseY, float delta){ checkHovered(mouseX, mouseY); renderBindTexture(TEXTURE); renderSystemBlendColor(1, 1, 1, 1); renderEnableBend(); renderDefaultBendFunction(); renderEnableDepthText(); this.drawTexture(matrices, 0, 0, 0, 0, 2); if(this.hovered){ FastChooseElement part = getActivePart(mouseX, mouseY); if(part != null && doHoverPart(part)){ part.renderHover(matrices); } } for(FastChooseElement f : elements){ if(f.hasEmote()) f.render(matrices); } } protected abstract boolean doHoverPart(FastChooseElement part); /** * @param matrices MatrixStack ... * @param x Render x from this pixel * @param y same * @param u texture x * @param v texture y * @param s used texture part size !NOT THE WHOLE TEXTURE IMAGE SIZE! */ private void drawTexture(MATRIX matrices, int x, int y, int u, int v, int s){ drawableDrawTexture(matrices, this.x + x * this.size / 256, this.y + y * this.size / 256, s * this.size / 2, s * this.size / 2, u, v, s * 128, s * 128, 512, 512); } private void checkHovered(int mouseX, int mouseY){ this.hovered = mouseX >= this.x && mouseY >= this.y && mouseX <= this.x + this.size && mouseY <= this.y + this.size; } public boolean emotes_mouseClicked(double mouseX, double mouseY, int button){ checkHovered((int) mouseX, (int) mouseY); if(this.hovered && this.isValidClickButton(button)){ FastChooseElement element = this.getActivePart((int) mouseX, (int) mouseY); if(element != null){ return EmotesOnClick(element, button); } } return false; } public boolean isMouseOver(double mouseX, double mouseY){ this.checkHovered((int) mouseX, (int) mouseY); return this.hovered; } protected abstract boolean isValidClickButton(int button); protected abstract boolean EmotesOnClick(FastChooseElement element, int button); //What DO I want to do with this element? set or play. protected abstract boolean doesShowInvalid(); protected class FastChooseElement { private final float angle; private final int id; @Nullable protected FastChooseElement(int num, float angle){ this.angle = angle; this.id = num; } public boolean hasEmote(){ return ((ClientConfig)EmoteInstance.config).fastMenuEmotes[id] != null; } public void setEmote(@Nullable EmoteHolder emote){ ((ClientConfig)EmoteInstance.config).fastMenuEmotes[id] = emote == null ? null : emote.getUuid(); } @Nullable public EmoteHolder getEmote(){ UUID uuid = ((ClientConfig)EmoteInstance.config).fastMenuEmotes[id]; if(uuid != null){ EmoteHolder emote = EmoteHolder.list.get(uuid); if(emote == null && doesShowInvalid()){ emote = new EmoteHolder.Empty(uuid); } return emote; } else { return null; } } public void clearEmote(){ this.setEmote(null); } public void render(MATRIX matrices){ UUID emoteID = ((ClientConfig)EmoteInstance.config).fastMenuEmotes[id] != null ? ((ClientConfig)EmoteInstance.config).fastMenuEmotes[id] : null; IIdentifier identifier = emoteID != null && EmoteHolder.list.get(emoteID) != null ? EmoteHolder.list.get(emoteID).getIconIdentifier() : null; if(identifier != null && ((ClientConfig)EmoteInstance.config).showIcons.get()){ int s = size / 10; int iconX = (int) (((float) (x + size / 2)) + size * 0.4 * Math.sin(this.angle * 0.0174533)) - s; int iconY = (int) (((float) (y + size / 2)) + size * 0.4 * Math.cos(this.angle * 0.0174533)) - s; renderBindTexture(identifier); drawableDrawTexture(matrices, iconX, iconY, s * 2, s * 2, 0, 0, 256, 256, 256, 256); }else{ if(((ClientConfig)EmoteInstance.config).fastMenuEmotes[id] != null){ drawCenteredText(matrices, EmoteHolder.getNonNull(((ClientConfig)EmoteInstance.config).fastMenuEmotes[id]).name, this.angle); }else{ EmoteInstance.instance.getLogger().log(Level.WARNING, "Tried to render non-existing name", true); } } } public void renderHover(MATRIX matrices){ int textX = 0; int textY = 0; int x = 0; int y = 0; if((id & 1) == 0){ textY = 256; }else{ textX = 256; } if((id & 2) == 0){ y = 128; } if((id & 4) == 0){ x = 128; } drawTexture(matrices, x, y, textX + x, textY + y, 1); } } }
3e11a5e6c604abb51c7bb1feb51c86451a876941
174
java
Java
src/main/java/uk/ac/ebi/ddi/service/db/model/dataset/DbDatasetCount.java
BD2K-DDI/ddi-service-db
6eb8670d1173e997e02c29ffe556f9b75ff1a188
[ "Apache-2.0" ]
null
null
null
src/main/java/uk/ac/ebi/ddi/service/db/model/dataset/DbDatasetCount.java
BD2K-DDI/ddi-service-db
6eb8670d1173e997e02c29ffe556f9b75ff1a188
[ "Apache-2.0" ]
4
2020-04-23T17:27:53.000Z
2021-07-06T11:39:13.000Z
src/main/java/uk/ac/ebi/ddi/service/db/model/dataset/DbDatasetCount.java
BD2K-DDI/ddi-service-db
6eb8670d1173e997e02c29ffe556f9b75ff1a188
[ "Apache-2.0" ]
1
2016-03-27T03:03:59.000Z
2016-03-27T03:03:59.000Z
14.5
47
0.683908
7,450
package uk.ac.ebi.ddi.service.db.model.dataset; /** * Created by gaur on 27/02/18. */ public class DbDatasetCount { public String _id; public Integer dbCount; }
3e11a804196fd6a4d4b9f52cf869b7ca075eabdb
12,002
java
Java
uengine-core/src/main/java/org/uengine/persistence/dao/MySQLDAOFactory.java
uengine-oss/uengine-bpm
39a3c8b4d3b30d4bb763421619130d87863bbeef
[ "Apache-2.0" ]
13
2015-07-15T01:01:37.000Z
2022-03-05T16:27:18.000Z
uengine-core/src/main/java/org/uengine/persistence/dao/MySQLDAOFactory.java
uengine/uengine-bpm
d1fa2f43c06295830e7fae9b7011ed2ee75b6d4c
[ "Apache-2.0" ]
22
2016-11-29T03:24:38.000Z
2018-03-13T01:09:34.000Z
uengine-core/src/main/java/org/uengine/persistence/dao/MySQLDAOFactory.java
uengine/uengine-bpm
d1fa2f43c06295830e7fae9b7011ed2ee75b6d4c
[ "Apache-2.0" ]
8
2016-05-14T22:51:44.000Z
2021-09-24T02:25:22.000Z
29.781638
481
0.627229
7,451
/* * Created on 2004. 12. 14. */ package org.uengine.persistence.dao; import org.uengine.util.dao.*; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.*; import java.lang.reflect.*; /** * @author Jinyoung Jang */ public class MySQLDAOFactory extends OracleDAOFactory{ static Hashtable currKeys = new Hashtable(); public WorkListDAO createWorkListDAOForInsertCall(Map options) throws Exception{ return (WorkListDAO)Proxy.newProxyInstance( WorkListDAO.class.getClassLoader(), new Class[]{WorkListDAO.class}, new ConnectiveDAO( getConnectionFactory(), true, "insert into bpm_worklist(taskid, title, description, endpoint, resname, status, priority, startdate, enddate, duedate, instid, rootinstid, defid, defname, trctag, tool, dispatchoption, parameter, rolename, refrolename, dispatchparam1) values(?taskid, ?title, ?description, ?endpoint, ?resname, ?status, ?priority, ?startdate, ?enddate, ?duedate, ?instid, ?rootinstid, ?defid, ?defname, ?trctag, ?tool, ?dispatchoption, ?parameter, ?rolename, ?refrolename, ?dispatchparam1)", WorkListDAO.class ){ public int call() throws Exception{ Number var_taskId = (Number)get("taskid"); if(var_taskId!=null){ WorkListDAO existingWorklist = (WorkListDAO)GenericDAO.createDAOImpl( getConnectionFactory(), "delete from bpm_worklist where taskid=?taskid", WorkListDAO.class ); existingWorklist.setTaskId(var_taskId); existingWorklist.update(); }else{ KeyGeneratorDAO kg = DAOFactory.getInstance(getConnectionFactory()).createKeyGenerator("worklist", null); kg.select(); kg.next(); Number taskId = kg.getKeyNumber(); set("taskid", taskId); } int cnt = super.insert(); return cnt; } } ); } public KeyGeneratorDAO createKeyGenerator(final String forWhat, final Map options) throws Exception { boolean option_useTableNameHeader = true; if(options!=null && options.containsKey("useTableNameHeader")){ option_useTableNameHeader = !"false".equals(options.get("useTableNameHeader")); } final boolean useTableNameHeader = option_useTableNameHeader; return new KeyGeneratorDAO(){ public Number getKeyNumber() { String forTableName = new String(forWhat); String forColumnName = new String(((useTableNameHeader) ? forWhat : "") + "id"); forColumnName = forColumnName.replaceFirst("Proc", ""); // forTableName = forTableName.toLowerCase(); forTableName = forTableName.toUpperCase(); if (forTableName.equals("WORKITEM")) { forColumnName = "TASKID"; } Connection conn = null; Statement stmt_select_seq = null; ResultSet rs_select_seq = null; Statement stmt_select_table_max_key = null; ResultSet rs_select_table_max_key = null; PreparedStatement pstmt_update_seq = null; PreparedStatement pstmt_insert_seq = null; try { conn = getConnectionFactory().getConnection(); if(conn == null) throw new RuntimeException("Database connection is null."); conn.setAutoCommit(false); Long seq_key = null; stmt_select_seq = conn.createStatement(); rs_select_seq = stmt_select_seq.executeQuery("select ifnull(max(seq),0) + 1 as lastkey from bpm_seq where tbname = '" + forTableName.toLowerCase() + "'"); if (rs_select_seq.next()) { seq_key = rs_select_seq.getLong("lastkey"); } else { seq_key = new Long(1); } Long id_key = null; stmt_select_table_max_key = conn.createStatement(); rs_select_table_max_key = stmt_select_table_max_key.executeQuery("select ifnull(max("+forColumnName.toLowerCase()+"),0) as lastkey from " +(forTableName.equals("WORKITEM")? "BPM_WORKLIST" : ((useTableNameHeader)?"BPM_":"") + forTableName).toLowerCase()); if (rs_select_table_max_key.next()) { id_key = rs_select_table_max_key.getLong("lastkey"); } Long key = null; if (seq_key.longValue() > id_key.longValue()) { key = seq_key; } else { key = new Long(id_key.longValue() + 1); } pstmt_update_seq = conn.prepareStatement("update bpm_seq set seq = ? , moddate = now() where tbname = ?"); pstmt_update_seq.setLong(1, key); pstmt_update_seq.setString(2, forTableName); int modcount = pstmt_update_seq.executeUpdate(); if(modcount == 0) { pstmt_insert_seq = conn.prepareStatement("insert into bpm_seq (tbname, seq, description, moddate) values(?, ?, ?, now())"); pstmt_insert_seq.setString(1, forTableName); pstmt_insert_seq.setLong(2, key); pstmt_insert_seq.setString(3, forTableName); pstmt_insert_seq.executeUpdate(); } conn.commit(); return key; } catch (Exception e1) { if(conn!=null) try { conn.rollback(); } catch (SQLException e) { e.printStackTrace(); } throw new RuntimeException(e1); } finally { if (stmt_select_seq != null) try { stmt_select_seq.close(); } catch (SQLException e1) {} if (rs_select_seq != null) try { rs_select_seq.close(); } catch (SQLException e1) {} if (stmt_select_table_max_key != null) try { stmt_select_table_max_key.close(); } catch (SQLException e1) {} if (rs_select_table_max_key != null) try { rs_select_table_max_key.close(); } catch (SQLException e1) {} if (pstmt_update_seq != null) try { pstmt_update_seq.close(); } catch (SQLException e1) {} if (pstmt_insert_seq != null) try { pstmt_insert_seq.close(); } catch (SQLException e1) {} // if (conn != null) try { conn.setAutoCommit(true); } catch (SQLException e1) {} // if (conn != null) try { conn.close(); } catch (SQLException e) {} } // DefaultTransactionContext tc = new SimpleTransactionContext(); // try { // // String forTableName = new String(forWhat); // String forColumnName = new String(((useTableNameHeader)?forWhat:"") + "id"); // forColumnName = forColumnName.replaceFirst("Proc",""); // forTableName = forTableName.toLowerCase(); // if(forTableName.equals("workitem")) forColumnName = "taskid"; // // //bpm_seq sequence getting/////////////////////////////////////////////////////////////////// // Long seq_key = null; // IDAO gdao = ConnectiveDAO.createDAOImpl( // tc, // "select ifnull(max(seq),0) + 1 as lastKey from bpm_seq where tbname = '" + forTableName + "'", // IDAO.class // ); // gdao.select(); // if(gdao.next()){ // Number currKey = (Number)gdao.get("lastKey"); // seq_key = new Long(currKey.longValue()); // } else { // seq_key = new Long(1); // } // // //table sequence getting///////////////////////////////////////////////////////////////////// // Long id_key = null; // IDAO tdao = ConnectiveDAO.createDAOImpl( // tc, // "select ifnull(max("+forColumnName+"),0) as lastKey from " +(forTableName.equals("workitem")? "bpm_worklist" : ((useTableNameHeader)?"bpm_":"") + forTableName) , // IDAO.class // ); // tdao.select(); // if(tdao.next()){ // Number currKey = (Number)tdao.get("lastKey"); // id_key = new Long(currKey.longValue()); // } // // //update key///////////////////////////////////////////////////////////////////////////////// // Long key = null; // if(seq_key.longValue() > id_key.longValue()) key = seq_key; // else key = new Long(id_key.longValue()+1); // // IDAO udao = ConnectiveDAO.createDAOImpl( // tc, // "update bpm_seq set seq = ?seq , moddate = now() where tbname = ?tbname", // IDAO.class // ); // // udao.set("seq", key); // udao.set("tbname", forTableName); // //udao.set("preSeq", new Long(key.longValue()-1)); // // //if seq dont't exist,seq insert //////////////////////////////////////////////////////////// // int modcount = udao.update(); // if(modcount == 0){ // IDAO idao = ConnectiveDAO.createDAOImpl( // tc, // "insert into bpm_seq (tbname, seq, description, moddate) values(?tbname, ?seq, ?description, now())", // IDAO.class // ); // idao.set("tbname", forTableName); // idao.set("seq", key); // idao.set("description", forTableName); // idao.insert(); // } // // return key; // // } catch (Exception e) { // // TODO Auto-generated catch block // //e.printStackTrace(); // // throw new RuntimeException(e); // } finally { // try { // tc.releaseResources(); // } catch (Exception e) { // e.printStackTrace(); // } // } } public void setKeyNumber(Number id) { // TODO Auto-generated method stub } public void select() throws Exception { // TODO Auto-generated method stub } public int insert() throws Exception { // TODO Auto-generated method stub return 0; } public int update() throws Exception { // TODO Auto-generated method stub return 0; } public int call() throws Exception { // TODO Auto-generated method stub return 0; } public void beforeFirst() throws Exception { // TODO Auto-generated method stub } public boolean previous() throws Exception { // TODO Auto-generated method stub return false; } public boolean next() throws Exception { // TODO Auto-generated method stub return false; } public boolean first() throws Exception { // TODO Auto-generated method stub return false; } public void afterLast() throws Exception { } public boolean last() throws Exception { return false; } public int size() { // TODO Auto-generated method stub return 0; } public Object get(String key) throws Exception { // TODO Auto-generated method stub return null; } public Object set(String key, Object value) throws Exception { // TODO Auto-generated method stub return null; } public int update(String stmt) throws Exception { // TODO Auto-generated method stub return 0; } public void addBatch() throws Exception { // TODO Auto-generated method stub } public int[] updateBatch() throws Exception { // TODO Auto-generated method stub return null; } public String getString(String key) throws Exception { // TODO Auto-generated method stub return null; } public Integer getInt(String key) throws Exception { // TODO Auto-generated method stub return null; } public Long getLong(String key) throws Exception { // TODO Auto-generated method stub return null; } public Boolean getBoolean(String key) throws Exception { // TODO Auto-generated method stub return null; } public Date getDate(String key) throws Exception { // TODO Auto-generated method stub return null; } public boolean absolute(int pos) throws Exception { // TODO Auto-generated method stub return false; } public AbstractGenericDAO getImplementationObject() { // TODO Auto-generated method stub return null; } public void releaseResource() throws Exception { // TODO Auto-generated method stub } }; } public String getSequenceSql(String seqName) throws Exception { // TODO Auto-generated method stub return ""; } public String getDBMSProductName() throws Exception { return "MySQL"; } public Calendar getNow() throws Exception { IDAO nowQuery = (IDAO)create(IDAO.class, "select now() as now"); //SELECT DATE_FORMAT(now(), '%Y-%m-%d') nowQuery.select(); if(nowQuery.next()){ Calendar now = Calendar.getInstance(); now.setTime((Date)nowQuery.get("now")); return now; }else{ throw new Exception("Can't get current system date from DB."); } } }
3e11a823405e37eaedb844f70b35a5529c1798a4
2,724
java
Java
src/main/java/ashes/quill/NodeSystem/NodeSaveLoad.java
Quillewd/AshesCraft
8ef44c056f31352af27e8c3b23a00c3f2a092f16
[ "MIT" ]
null
null
null
src/main/java/ashes/quill/NodeSystem/NodeSaveLoad.java
Quillewd/AshesCraft
8ef44c056f31352af27e8c3b23a00c3f2a092f16
[ "MIT" ]
1
2020-09-09T18:56:31.000Z
2020-09-09T18:56:31.000Z
src/main/java/ashes/quill/NodeSystem/NodeSaveLoad.java
Quillewd/AshesCraft
8ef44c056f31352af27e8c3b23a00c3f2a092f16
[ "MIT" ]
null
null
null
31.310345
139
0.613803
7,452
package ashes.quill.NodeSystem; import ashes.quill.Config.Constants; import ashes.quill.Data.DataManager; import ashes.quill.Utils.Coordinates2d; import org.json.JSONObject; import java.io.File; public class NodeSaveLoad { //Create instance of data manager protected DataManager dataManager = new DataManager(); /** * Save data about ashes players to a local file * @param node the node to save */ public void saveNode(Node node) { saveLoadLog("Attempting to save node " + node.getName() + " at " + node.getCoordinateString()); //Create JSON Object for the player JSONObject nodeObject = new JSONObject(); nodeObject.put("x", node.getX()); nodeObject.put("z", node.getZ()); nodeObject.put("name", node.getName()); nodeObject.put("level", node.getLevel()); nodeObject.put("experience", node.getExperience()); File path = new File(Constants.nodePath, node.getX() + ", " + node.getZ() + ".json"); dataManager.writeJSONSafe(path, nodeObject); } /** * Load the player from their respective JSON file * @param node to load from file */ public void loadNode(Node node){ NodeManager nodeManager = NodeManager.getInstance(); //Create file File path = new File(Constants.nodePath, node.getX() + ", " + node.getZ() + ".json"); //check if the file exists if(!path.exists()){ //If the file doesn't exist, say that it doesn't saveLoadLog("File for node " + node.getCoordinateString() + " does not exist."); //Register the player nodeManager.createNode(node); return; } //Get json object from file JSONObject nodeObject = dataManager.readJSON(path); //Get the node information from its save file String name = nodeObject.getString("name"); int x = nodeObject.getInt("x"); int z = nodeObject.getInt("z"); int level = nodeObject.getInt("level"); int experience = nodeObject.getInt("experience"); //Create a node based on the loaded data Node loadedNode = new Node(new Coordinates2d(x, z), name, level, experience); //Register the node if(!nodeManager.nodeExists(loadedNode)){ //add the node if it doesn't exist nodeManager.addNode(loadedNode); //Log the players info saveLoadLog("Loaded node " + name + "{ level:" + level + ", experience:" + experience + " } at " + node.getCoordinateString()); } } private void saveLoadLog(String message){ System.out.println("[Ashes] [NodeLoader] " + message); } }
3e11a8e5c8eec90be115c8333d9d91b9fcfa967a
6,791
java
Java
RISE-V2G-Shared/src/main/java/com/v2gclarity/risev2g/shared/misc/V2GCommunicationSession.java
mrbig/RISE-V2G
79917ab517ef9b60c094efd3fc12cc38cf6462d3
[ "MIT" ]
102
2017-07-24T07:41:03.000Z
2021-05-17T19:38:12.000Z
RISE-V2G-Shared/src/main/java/com/v2gclarity/risev2g/shared/misc/V2GCommunicationSession.java
mrbig/RISE-V2G
79917ab517ef9b60c094efd3fc12cc38cf6462d3
[ "MIT" ]
58
2017-07-24T07:43:01.000Z
2021-05-25T08:54:14.000Z
RISE-V2G-Shared/src/main/java/com/v2gclarity/risev2g/shared/misc/V2GCommunicationSession.java
mrbig/RISE-V2G
79917ab517ef9b60c094efd3fc12cc38cf6462d3
[ "MIT" ]
50
2017-07-24T08:34:19.000Z
2021-05-02T15:21:21.000Z
32.338095
123
0.74584
7,453
/******************************************************************************* * The MIT License (MIT) * * Copyright (c) 2015 - 2019 Dr. Marc Mültin (V2G Clarity) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. *******************************************************************************/ package com.v2gclarity.risev2g.shared.misc; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.Observable; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import com.v2gclarity.risev2g.shared.enumerations.V2GMessages; import com.v2gclarity.risev2g.shared.messageHandling.MessageHandler; import com.v2gclarity.risev2g.shared.messageHandling.PauseSession; import com.v2gclarity.risev2g.shared.messageHandling.TerminateSession; import com.v2gclarity.risev2g.shared.utils.ByteUtils; import com.v2gclarity.risev2g.shared.utils.MiscUtils; import com.v2gclarity.risev2g.shared.utils.SecurityUtils; import com.v2gclarity.risev2g.shared.v2gMessages.msgDef.EnergyTransferModeType; import com.v2gclarity.risev2g.shared.v2gMessages.msgDef.PaymentOptionListType; import com.v2gclarity.risev2g.shared.v2gMessages.msgDef.PaymentOptionType; import com.v2gclarity.risev2g.shared.v2gMessages.msgDef.V2GMessage; public abstract class V2GCommunicationSession extends Observable { private Logger logger = LogManager.getLogger(this.getClass().getSimpleName()); private HashMap<V2GMessages, State> states; private State currentState; private State startState; private MessageHandler messageHandler; private byte[] sessionID; private V2GTPMessage v2gTpMessage; private V2GMessage v2gMessage; private boolean tlsConnection; public V2GCommunicationSession() { setStates(new HashMap<V2GMessages, State>()); setMessageHandler(MessageHandler.getInstance()); setSessionID(null); setV2gTpMessage(null); } /** * Generates randomly a new session ID (with length of 8 bytes) and takes care that the newly generated * session ID does not match the store previous session ID and that it is unequal to 0. * @return The byte array representation of the provided session ID */ public byte[] generateSessionIDRandomly() { byte[] sessionID = new byte[8]; while (sessionID == null || ByteUtils.toLongFromByteArray(sessionID) == 0L || Arrays.equals(sessionID, getSessionID())) { sessionID = SecurityUtils.generateRandomNumber(8); } return sessionID; } protected void pauseSession(PauseSession pauseObject) { getLogger().info("Pausing V2G communication session"); setChanged(); notifyObservers(pauseObject); } protected void terminateSession(TerminateSession termination) { String terminationPrefix = "Terminating V2G communication session, reason: "; if (termination.isSuccessfulTermination()) { getLogger().info(terminationPrefix + termination.getReasonForSessionStop()); } else { getLogger().warn(terminationPrefix + termination.getReasonForSessionStop()); } setChanged(); notifyObservers(termination); } /** * Should be used if no TerminateSession instance has been provided by the respective state * but some other case causes a session termination * * @param reason The termination cause * @param successful True, in case of a successful session termination, false otherwise */ protected void terminateSession(String reason, boolean successful) { String terminationPrefix = "Terminating V2G communication session, reason: "; TerminateSession termination = new TerminateSession(reason, successful); if (successful) getLogger().debug(terminationPrefix + reason); else getLogger().error(terminationPrefix + reason); setChanged(); notifyObservers(termination); } public ArrayList<EnergyTransferModeType> getSupportedEnergyTransferModes() { @SuppressWarnings("unchecked") ArrayList<EnergyTransferModeType> energyTransferModes = (MiscUtils.getPropertyValue("energy.transfermodes.supported") != null) ? ((ArrayList<EnergyTransferModeType>) MiscUtils.getPropertyValue("energy.transfermodes.supported")) : new ArrayList<EnergyTransferModeType>(); return energyTransferModes; } public Logger getLogger() { return logger; } public HashMap<V2GMessages, State> getStates() { return states; } public void setStates(HashMap<V2GMessages, State> states) { this.states = states; } public State getCurrentState() { return currentState; } public void setCurrentState(State newState) { this.currentState = newState; if (newState == null) { getLogger().error("New state is not provided (null)"); } else { getLogger().debug("New state is " + this.currentState.getClass().getSimpleName()); } } public State getStartState() { return startState; } public void setStartState(State startState) { this.startState = startState; } public MessageHandler getMessageHandler() { return messageHandler; } public byte[] getSessionID() { return sessionID; } public void setSessionID(byte[] sessionID) { if (sessionID == null) { sessionID = ByteUtils.toByteArrayFromHexString("00"); } this.sessionID = sessionID; } public V2GTPMessage getV2gTpMessage() { return v2gTpMessage; } public void setV2gTpMessage(V2GTPMessage v2gTpMessage) { this.v2gTpMessage = v2gTpMessage; } public void setMessageHandler(MessageHandler messageHandler) { this.messageHandler = messageHandler; } public V2GMessage getV2gMessage() { return v2gMessage; } public void setV2gMessage(V2GMessage v2gMessage) { this.v2gMessage = v2gMessage; } public boolean isTlsConnection() { return tlsConnection; } public void setTlsConnection(boolean tlsConnection) { this.tlsConnection = tlsConnection; } }
3e11a9af46d5649f493158a2a4a3f5f761439e8f
372
java
Java
budgeteer-rest/src/main/java/de/adesso/budgeteer/rest/security/authorization/aspects/annotations/HasAccessToProject.java
adessoSE/budgeteer
54284931ee0779246972eac3b41ec0ccf6a20397
[ "MIT" ]
null
null
null
budgeteer-rest/src/main/java/de/adesso/budgeteer/rest/security/authorization/aspects/annotations/HasAccessToProject.java
adessoSE/budgeteer
54284931ee0779246972eac3b41ec0ccf6a20397
[ "MIT" ]
5
2022-02-16T10:18:12.000Z
2022-02-22T10:48:44.000Z
budgeteer-rest/src/main/java/de/adesso/budgeteer/rest/security/authorization/aspects/annotations/HasAccessToProject.java
adessoSE/budgeteer
54284931ee0779246972eac3b41ec0ccf6a20397
[ "MIT" ]
null
null
null
33.818182
76
0.836022
7,454
package de.adesso.budgeteer.rest.security.authorization.aspects.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; @Retention(RetentionPolicy.RUNTIME) @Target(value = {ElementType.METHOD, ElementType.TYPE}) public @interface HasAccessToProject {}
3e11aac612b0452c5c0440b7ec477d4cef19faa0
3,192
java
Java
src/main/java/com/emc/ecs/management/sdk/actions/iam/IAMPolicyAction.java
codedellemc/ecs-cf-service-broker
8a9be9a0975ad817ba6939c7973389198ee74d20
[ "Apache-2.0" ]
7
2016-12-12T14:34:50.000Z
2017-11-10T19:49:43.000Z
src/main/java/com/emc/ecs/management/sdk/actions/iam/IAMPolicyAction.java
codedellemc/ecs-cf-service-broker
8a9be9a0975ad817ba6939c7973389198ee74d20
[ "Apache-2.0" ]
29
2017-02-19T14:17:22.000Z
2018-01-29T15:30:47.000Z
src/main/java/com/emc/ecs/management/sdk/actions/iam/IAMPolicyAction.java
codedellemc/ecs-cf-service-broker
8a9be9a0975ad817ba6939c7973389198ee74d20
[ "Apache-2.0" ]
9
2017-02-18T21:46:22.000Z
2017-08-25T20:13:46.000Z
44.957746
168
0.702068
7,455
package com.emc.ecs.management.sdk.actions.iam; import com.emc.ecs.management.sdk.ManagementAPIConnection; import com.emc.ecs.management.sdk.model.iam.policy.CreatePolicyResponse; import com.emc.ecs.management.sdk.model.iam.policy.GetPolicyResponse; import com.emc.ecs.management.sdk.model.iam.policy.IamPolicy; import com.emc.ecs.servicebroker.exception.EcsManagementClientException; import com.emc.ecs.servicebroker.exception.EcsManagementResourceNotFoundException; import javax.ws.rs.core.Response; import javax.ws.rs.core.UriBuilder; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; import java.nio.charset.Charset; import java.util.logging.Level; import java.util.logging.Logger; import static com.emc.ecs.management.sdk.ManagementAPIConstants.IAM; import static com.emc.ecs.management.sdk.actions.iam.IAMActionUtils.accountHeader; import static javax.ws.rs.HttpMethod.POST; public class IAMPolicyAction { public static IamPolicy create(ManagementAPIConnection connection, String policyName, String policyDocument, String accountId) throws EcsManagementClientException { String encodedDocument = null; try { encodedDocument = URLEncoder.encode(policyDocument, "UTF-8").replaceAll("\\+", "%20"); } catch (UnsupportedEncodingException e) { e.printStackTrace(); } UriBuilder uri = connection.uriBuilder() .segment(IAM) //.segment("local") .queryParam("Action", "CreatePolicy") .queryParam("PolicyDocument", encodedDocument) .queryParam("PolicyName", policyName); Response response = IAMActionUtils.remoteCall(connection, POST, uri, null, accountHeader(accountId)); CreatePolicyResponse ret = response.readEntity(CreatePolicyResponse.class); return ret.getCreatePolicyResult().getPolicy(); } public static IamPolicy get(ManagementAPIConnection connection, String policyARN, String accountId) throws EcsManagementClientException { UriBuilder uri = connection.uriBuilder() .segment(IAM) //.segment("local") .queryParam("Action", "GetPolicy") .queryParam("PolicyArn", policyARN); try { Response response = IAMActionUtils.remoteCall(connection, POST, uri, null, accountHeader(accountId)); GetPolicyResponse ret = response.readEntity(GetPolicyResponse.class); return ret.getGetPolicyResult().getPolicy(); } catch (EcsManagementResourceNotFoundException e) { Logger.getAnonymousLogger().log(Level.FINE, "IAM policy not found: " + policyARN, e); return null; } } public static void delete(ManagementAPIConnection connection, String policyARN, String accountId) throws EcsManagementClientException { UriBuilder uri = connection.uriBuilder() .segment(IAM) //.segment("local") .queryParam("Action", "DeletePolicy") .queryParam("PolicyArn", policyARN); IAMActionUtils.remoteCall(connection, POST, uri, null, accountHeader(accountId)); } }
3e11ab791c0405297bcb5734bf143ded187d585a
330
java
Java
src/main/java/com/sowell/tools/exception/TagConstructException.java
cosmicparticle/cpf-tools
32524615f35ff19798ce2d2200ef0c294bedcfb5
[ "Apache-2.0" ]
null
null
null
src/main/java/com/sowell/tools/exception/TagConstructException.java
cosmicparticle/cpf-tools
32524615f35ff19798ce2d2200ef0c294bedcfb5
[ "Apache-2.0" ]
3
2021-02-03T19:38:07.000Z
2021-08-02T17:06:47.000Z
src/main/java/com/sowell/tools/exception/TagConstructException.java
cosmicparticle/cpf-tools
32524615f35ff19798ce2d2200ef0c294bedcfb5
[ "Apache-2.0" ]
null
null
null
22
63
0.693939
7,456
package com.sowell.tools.exception; import org.dom4j.Element; public class TagConstructException extends ConstructException { /** *serialVersionUID */ private static final long serialVersionUID = 1L; public TagConstructException(String log, Element e) { super(log + (e == null ? "" : ("[" + e.getPath() + "]"))); } }
3e11ac00bb13b1ea6c32ba698437ad9ebe2f5178
1,971
java
Java
Chapter10/mapper/SFM/jOOQSpringBootSFMManyToManySQMMySQL/src/main/java/com/classicmodels/pojo/SimpleManager.java
mcac0006/Up-and-Running-with-jOOQ
41b6bea8df5e1a07e78a68e0d4b6337132c909c6
[ "MIT" ]
28
2020-09-18T14:32:24.000Z
2022-03-22T14:48:19.000Z
Chapter10/mapper/SFM/jOOQSpringBootSFMManyToManySQMMySQL/src/main/java/com/classicmodels/pojo/SimpleManager.java
mcac0006/Up-and-Running-with-jOOQ
41b6bea8df5e1a07e78a68e0d4b6337132c909c6
[ "MIT" ]
2
2021-03-22T12:20:16.000Z
2022-01-03T12:54:35.000Z
Chapter10/mapper/SFM/jOOQSpringBootSFMManyToManySQMMySQL/src/main/java/com/classicmodels/pojo/SimpleManager.java
mcac0006/Up-and-Running-with-jOOQ
41b6bea8df5e1a07e78a68e0d4b6337132c909c6
[ "MIT" ]
10
2021-03-20T13:30:37.000Z
2022-03-25T04:23:12.000Z
22.397727
67
0.607306
7,457
package com.classicmodels.pojo; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonInclude.Include; import java.io.Serializable; import java.util.List; import java.util.Objects; import org.simpleflatmapper.map.annotation.Key; public class SimpleManager implements Serializable { private static final long serialVersionUID = 1; @Key private Long managerId; private String managerName; @JsonInclude(Include.NON_EMPTY) private List<SimpleOffice> offices; public Long getManagerId() { return managerId; } public void setManagerId(Long managerId) { this.managerId = managerId; } public String getManagerName() { return managerName; } public void setManagerName(String managerName) { this.managerName = managerName; } public List<SimpleOffice> getOffices() { return offices; } public void setOffices(List<SimpleOffice> offices) { this.offices = offices; } @Override public int hashCode() { int hash = 3; hash = 83 * hash + Objects.hashCode(this.managerId); hash = 83 * hash + Objects.hashCode(this.managerName); return hash; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } final SimpleManager other = (SimpleManager) obj; if (!Objects.equals(this.managerName, other.managerName)) { return false; } if (!Objects.equals(this.managerId, other.managerId)) { return false; } return true; } @Override public String toString() { return "Manager{" + "managerId=" + managerId + ", managerName=" + managerName + '}'; } }
3e11ac0ee25afdf17d3dba79cd03cab316b13b58
1,796
java
Java
junior_001/src/test/java/ru/job4j/services/SimpleArrayTest.java
nvladislavn/job4j
e0245dc749a15348d20ace229547d176e309772a
[ "Apache-2.0" ]
null
null
null
junior_001/src/test/java/ru/job4j/services/SimpleArrayTest.java
nvladislavn/job4j
e0245dc749a15348d20ace229547d176e309772a
[ "Apache-2.0" ]
2
2021-12-10T01:22:34.000Z
2021-12-14T20:41:59.000Z
junior_001/src/test/java/ru/job4j/services/SimpleArrayTest.java
nvladislavn/job4j
e0245dc749a15348d20ace229547d176e309772a
[ "Apache-2.0" ]
null
null
null
23.946667
45
0.54343
7,458
package ru.job4j.services; import org.junit.Before; import org.junit.Test; import java.util.Iterator; import static org.hamcrest.core.Is.is; import static org.junit.Assert.*; public class SimpleArrayTest { private SimpleArray<Integer> sa; Iterator<Integer> it; @Before public void createSimpleArray() { sa = new SimpleArray<>(10); sa.add(1); sa.add(2); sa.add(3); sa.add(4); sa.add(5); it = sa.iterator(); } @Test public void when5TimesDoNextThen12345() { assertThat(it.next(), is(1)); assertThat(it.next(), is(2)); assertThat(it.next(), is(3)); assertThat(it.next(), is(4)); assertThat(it.next(), is(5)); } @Test public void shouldReturnsFalse() { it.next(); it.next(); it.next(); it.next(); it.next(); assertThat(it.hasNext(), is(false)); } @Test public void theCallSequence() { assertThat(it.hasNext(), is(true)); assertThat(it.next(), is(1)); assertThat(it.hasNext(), is(true)); assertThat(it.next(), is(2)); assertThat(it.hasNext(), is(true)); assertThat(it.next(), is(3)); assertThat(it.hasNext(), is(true)); assertThat(it.next(), is(4)); assertThat(it.hasNext(), is(true)); assertThat(it.next(), is(5)); assertThat(it.hasNext(), is(false)); } @Test public void threeShouldBeNine() { sa.set(2, 9); assertThat(sa.get(2), is(9)); } @Test public void shouldReturns1245Null() { sa.remove(2); assertThat(it.next(), is(1)); assertThat(it.next(), is(2)); assertThat(it.next(), is(4)); assertThat(it.next(), is(5)); } }
3e11ac2900a2f15130ea9e1bc72451986108984d
711
java
Java
src/com/cg/java/demo/Lab1Q8.java
vivektanpure0/com-cg-java-labbook
11ac0a2f969febb08b4e8c624afced5b47e8b352
[ "MIT" ]
null
null
null
src/com/cg/java/demo/Lab1Q8.java
vivektanpure0/com-cg-java-labbook
11ac0a2f969febb08b4e8c624afced5b47e8b352
[ "MIT" ]
null
null
null
src/com/cg/java/demo/Lab1Q8.java
vivektanpure0/com-cg-java-labbook
11ac0a2f969febb08b4e8c624afced5b47e8b352
[ "MIT" ]
null
null
null
19.75
59
0.451477
7,459
package com.cg.java.demo; import java.util.Scanner; public class Lab1Q8 { static boolean checkNumber(int n) { while(n % 2==0) {//number even or not n=n/2;//4,2,1 } if(n==1) { return true; } else { return false; } } public static void main(String[] args) { Scanner sc = new Scanner(System.in); System.out.println("Enter the number "); int n = sc.nextInt(); if (checkNumber(n)) System.out.println(n + " is a power of 2"); else System.out.println(n + " isnot a power of 2"); } }
3e11ac36df06bbc8ec1d02d33217c177659786de
4,413
java
Java
certgen/certProcessor/src/main/java/org/incredible/pojos/ob/Assertion.java
Raju/cert-service
7affe88d76d938d65982a20040a690bfb8d39588
[ "MIT" ]
null
null
null
certgen/certProcessor/src/main/java/org/incredible/pojos/ob/Assertion.java
Raju/cert-service
7affe88d76d938d65982a20040a690bfb8d39588
[ "MIT" ]
63
2019-09-23T06:58:38.000Z
2022-03-02T08:04:38.000Z
certgen/certProcessor/src/main/java/org/incredible/pojos/ob/Assertion.java
Raju/cert-service
7affe88d76d938d65982a20040a690bfb8d39588
[ "MIT" ]
22
2019-07-29T09:45:35.000Z
2022-02-01T13:29:51.000Z
24.381215
89
0.645139
7,460
package org.incredible.pojos.ob; import org.apache.commons.lang3.StringUtils; import org.incredible.pojos.CompositeIdentityObject; import org.incredible.pojos.ob.exeptions.InvalidDateFormatException; import org.incredible.pojos.ob.valuator.ExpiryDateValuator; import org.incredible.pojos.ob.valuator.IssuedDateValuator; /** * Exactly per OpenBadges v2 specification */ public class Assertion extends OBBase { /** * HTTP URL or UUID from urn:uuid namespace */ private String id; /** * Simple string "Assertion" or URLs or IRIs of current context */ private String[] type; /** * DateTime string compatible with ISO 8601 guideline * For example, 2016-12-31T23:59:59+00:00 */ private String issuedOn; private CompositeIdentityObject recipient; private BadgeClass badge; /** * IRI or document representing an image representing this user’s achievement. * This must be a PNG or SVG image. Otherwise, use BadgeClass member. */ private String image; private Evidence evidence; /** * DateTime string compatible with ISO 8601 guideline * For example, 2016-12-31T23:59:59+00:00 */ private String expires; private VerificationObject verification; /** * A narrative that connects multiple pieces of evidence */ private String narrative; // Part of v2 spec, not referred in inCredible /** * Defaults to false if Assertion is not referenced from a * revokedAssertions list and may be omitted. */ private boolean revoked = false; /** * Optional published reason for revocation, if revoked. */ private String revocationReason; public Assertion() { } public Assertion(String ctx) { setContext(ctx); } public String getId() { return id; } public void setId(String id) { this.id = id; } public String[] getType() { return type; } public void setType(String[] type) { this.type = type; } public String getIssuedOn() { return issuedOn; } public void setIssuedOn(String issuedOn) throws InvalidDateFormatException { IssuedDateValuator issuedDateValuator = new IssuedDateValuator(); if (issuedDateValuator.evaluates(issuedOn) == null) { throw new InvalidDateFormatException("Issued date is not in a given format"); } else { this.issuedOn = issuedDateValuator.evaluates(issuedOn); } } public CompositeIdentityObject getRecipient() { return recipient; } public void setRecipient(CompositeIdentityObject recipient) { this.recipient = recipient; } public BadgeClass getBadge() { return badge; } public void setBadge(BadgeClass badge) { this.badge = badge; } public String getImage() { return image; } public void setImage(String image) { this.image = image; } public Evidence getEvidence() { return evidence; } public void setEvidence(Evidence evidence) { this.evidence = evidence; } public String getExpires() { return expires; } public void setExpires(String expires) throws InvalidDateFormatException { if (StringUtils.isNotBlank(expires)) { ExpiryDateValuator valuator = new ExpiryDateValuator(this.getIssuedOn()); if (valuator.evaluates(expires) == null) { throw new InvalidDateFormatException("Expiry date is in wrong format"); } else { this.expires = valuator.evaluates(expires); } } } public VerificationObject getVerification() { return verification; } public void setVerification(VerificationObject verification) { this.verification = verification; } public String getNarrative() { return narrative; } public void setNarrative(String narrative) { this.narrative = narrative; } public boolean isRevoked() { return revoked; } public void setRevoked(boolean revoked) { this.revoked = revoked; } public String getRevocationReason() { return revocationReason; } public void setRevocationReason(String revocationReason) { this.revocationReason = revocationReason; } }
3e11ace132c960b3c1c3cf9651db5e6074afaf9d
4,311
java
Java
src/main/java/com/twilio/rest/taskrouter/v1/workspace/WorkerUpdater.java
FMV1491/twilio-java
4730901cc6cde9721407a2cab396dbca031f4fc3
[ "MIT" ]
null
null
null
src/main/java/com/twilio/rest/taskrouter/v1/workspace/WorkerUpdater.java
FMV1491/twilio-java
4730901cc6cde9721407a2cab396dbca031f4fc3
[ "MIT" ]
null
null
null
src/main/java/com/twilio/rest/taskrouter/v1/workspace/WorkerUpdater.java
FMV1491/twilio-java
4730901cc6cde9721407a2cab396dbca031f4fc3
[ "MIT" ]
null
null
null
29.731034
113
0.628856
7,461
/** * This code was generated by * \ / _ _ _| _ _ * | (_)\/(_)(_|\/| |(/_ v1.0.0 * / / */ package com.twilio.rest.taskrouter.v1.workspace; import com.twilio.base.Updater; import com.twilio.exception.ApiConnectionException; import com.twilio.exception.ApiException; import com.twilio.exception.RestException; import com.twilio.http.HttpMethod; import com.twilio.http.Request; import com.twilio.http.Response; import com.twilio.http.TwilioRestClient; import com.twilio.rest.Domains; public class WorkerUpdater extends Updater<Worker> { private final String pathWorkspaceSid; private final String pathSid; private String activitySid; private String attributes; private String friendlyName; private Boolean rejectPendingReservations; /** * Construct a new WorkerUpdater. * * @param pathWorkspaceSid The workspace_sid * @param pathSid The sid */ public WorkerUpdater(final String pathWorkspaceSid, final String pathSid) { this.pathWorkspaceSid = pathWorkspaceSid; this.pathSid = pathSid; } /** * The activity_sid. * * @param activitySid The activity_sid * @return this */ public WorkerUpdater setActivitySid(final String activitySid) { this.activitySid = activitySid; return this; } /** * The attributes. * * @param attributes The attributes * @return this */ public WorkerUpdater setAttributes(final String attributes) { this.attributes = attributes; return this; } /** * The friendly_name. * * @param friendlyName The friendly_name * @return this */ public WorkerUpdater setFriendlyName(final String friendlyName) { this.friendlyName = friendlyName; return this; } /** * The reject_pending_reservations. * * @param rejectPendingReservations The reject_pending_reservations * @return this */ public WorkerUpdater setRejectPendingReservations(final Boolean rejectPendingReservations) { this.rejectPendingReservations = rejectPendingReservations; return this; } /** * Make the request to the Twilio API to perform the update. * * @param client TwilioRestClient with which to make the request * @return Updated Worker */ @Override @SuppressWarnings("checkstyle:linelength") public Worker update(final TwilioRestClient client) { Request request = new Request( HttpMethod.POST, Domains.TASKROUTER.toString(), "/v1/Workspaces/" + this.pathWorkspaceSid + "/Workers/" + this.pathSid + "", client.getRegion() ); addPostParams(request); Response response = client.request(request); if (response == null) { throw new ApiConnectionException("Worker update failed: Unable to connect to server"); } else if (!TwilioRestClient.SUCCESS.apply(response.getStatusCode())) { RestException restException = RestException.fromJson(response.getStream(), client.getObjectMapper()); if (restException == null) { throw new ApiException("Server Error, no content"); } throw new ApiException( restException.getMessage(), restException.getCode(), restException.getMoreInfo(), restException.getStatus(), null ); } return Worker.fromJson(response.getStream(), client.getObjectMapper()); } /** * Add the requested post parameters to the Request. * * @param request Request to add post params to */ private void addPostParams(final Request request) { if (activitySid != null) { request.addPostParam("ActivitySid", activitySid); } if (attributes != null) { request.addPostParam("Attributes", attributes); } if (friendlyName != null) { request.addPostParam("FriendlyName", friendlyName); } if (rejectPendingReservations != null) { request.addPostParam("RejectPendingReservations", rejectPendingReservations.toString()); } } }
3e11ad25329aede1bcea52dd447ce2848c6209f5
11,898
java
Java
Tema 16 - Mapas/Ejercicios/Tema 16 - Ej1/B4J/Objects/shell/src/b4j/example/b4xmainpage_subs_0.java
Lamashino/Teaching-B4J
99862588df4c97e0aad6a8a97b1b2cf5202bd165
[ "CC-BY-4.0" ]
2
2021-03-30T20:21:58.000Z
2021-07-07T08:41:58.000Z
Tema 16 - Mapas/Ejercicios/Tema 16 - Ej1/B4J/Objects/shell/src/b4j/example/b4xmainpage_subs_0.java
Lamashino/Teaching-B4J
99862588df4c97e0aad6a8a97b1b2cf5202bd165
[ "CC-BY-4.0" ]
null
null
null
Tema 16 - Mapas/Ejercicios/Tema 16 - Ej1/B4J/Objects/shell/src/b4j/example/b4xmainpage_subs_0.java
Lamashino/Teaching-B4J
99862588df4c97e0aad6a8a97b1b2cf5202bd165
[ "CC-BY-4.0" ]
null
null
null
61.96875
324
0.749958
7,462
package b4j.example; import anywheresoftware.b4a.BA; import anywheresoftware.b4a.pc.*; public class b4xmainpage_subs_0 { public static RemoteObject _b4xpage_created(RemoteObject __ref,RemoteObject _root1) throws Exception{ try { Debug.PushSubsStack("B4XPage_Created (b4xmainpage) ","b4xmainpage",1,__ref.getField(false, "ba"),__ref,22); if (RapidSub.canDelegate("b4xpage_created")) { return __ref.runUserSub(false, "b4xmainpage","b4xpage_created", __ref, _root1);} RemoteObject _país = RemoteObject.createImmutable(""); RemoteObject _capital = RemoteObject.createImmutable(""); Debug.locals.put("Root1", _root1); BA.debugLineNum = 22;BA.debugLine="Private Sub B4XPage_Created (Root1 As B4XView)"; Debug.ShouldStop(2097152); BA.debugLineNum = 23;BA.debugLine="Root = Root1"; Debug.ShouldStop(4194304); __ref.setField ("_root" /*RemoteObject*/ ,_root1); BA.debugLineNum = 24;BA.debugLine="Root.LoadLayout(\"MainPage\")"; Debug.ShouldStop(8388608); __ref.getField(false,"_root" /*RemoteObject*/ ).runVoidMethodAndSync ("LoadLayout",(Object)(RemoteObject.createImmutable("MainPage")),__ref.getField(false, "ba")); BA.debugLineNum = 25;BA.debugLine="Países.Initialize"; Debug.ShouldStop(16777216); __ref.getField(false,"_países" /*RemoteObject*/ ).runVoidMethod ("Initialize"); BA.debugLineNum = 27;BA.debugLine="Países.Put(\"Cuba\", \"La Habana\")"; Debug.ShouldStop(67108864); __ref.getField(false,"_países" /*RemoteObject*/ ).runVoidMethod ("Put",(Object)(RemoteObject.createImmutable(("Cuba"))),(Object)((RemoteObject.createImmutable("La Habana")))); BA.debugLineNum = 28;BA.debugLine="Países.Put(\"Chipre\", \"Nicosia\")"; Debug.ShouldStop(134217728); __ref.getField(false,"_países" /*RemoteObject*/ ).runVoidMethod ("Put",(Object)(RemoteObject.createImmutable(("Chipre"))),(Object)((RemoteObject.createImmutable("Nicosia")))); BA.debugLineNum = 29;BA.debugLine="Países.Put(\"Chequia\", \"Praga\")"; Debug.ShouldStop(268435456); __ref.getField(false,"_países" /*RemoteObject*/ ).runVoidMethod ("Put",(Object)(RemoteObject.createImmutable(("Chequia"))),(Object)((RemoteObject.createImmutable("Praga")))); BA.debugLineNum = 30;BA.debugLine="Países.Put(\"Egipto\", \"El Cairo\")"; Debug.ShouldStop(536870912); __ref.getField(false,"_países" /*RemoteObject*/ ).runVoidMethod ("Put",(Object)(RemoteObject.createImmutable(("Egipto"))),(Object)((RemoteObject.createImmutable("El Cairo")))); BA.debugLineNum = 31;BA.debugLine="Países.Put(\"Kenia\", \"Nairobi\")"; Debug.ShouldStop(1073741824); __ref.getField(false,"_países" /*RemoteObject*/ ).runVoidMethod ("Put",(Object)(RemoteObject.createImmutable(("Kenia"))),(Object)((RemoteObject.createImmutable("Nairobi")))); BA.debugLineNum = 32;BA.debugLine="Países.Put(\"México\", \"Ciudad de México\")"; Debug.ShouldStop(-2147483648); __ref.getField(false,"_países" /*RemoteObject*/ ).runVoidMethod ("Put",(Object)(RemoteObject.createImmutable(("México"))),(Object)((RemoteObject.createImmutable("Ciudad de México")))); BA.debugLineNum = 33;BA.debugLine="Países.Put(\"Perú\", \"Lima\")"; Debug.ShouldStop(1); __ref.getField(false,"_países" /*RemoteObject*/ ).runVoidMethod ("Put",(Object)(RemoteObject.createImmutable(("Perú"))),(Object)((RemoteObject.createImmutable("Lima")))); BA.debugLineNum = 34;BA.debugLine="Países.Put(\"Vietnam\", \"Hanoi\")"; Debug.ShouldStop(2); __ref.getField(false,"_países" /*RemoteObject*/ ).runVoidMethod ("Put",(Object)(RemoteObject.createImmutable(("Vietnam"))),(Object)((RemoteObject.createImmutable("Hanoi")))); BA.debugLineNum = 35;BA.debugLine="Países.Put(\"Portugal \", \"Lisboa\")"; Debug.ShouldStop(4); __ref.getField(false,"_países" /*RemoteObject*/ ).runVoidMethod ("Put",(Object)(RemoteObject.createImmutable(("Portugal "))),(Object)((RemoteObject.createImmutable("Lisboa")))); BA.debugLineNum = 39;BA.debugLine="Países.Put(\"Japón\", \"Tokio\")"; Debug.ShouldStop(64); __ref.getField(false,"_países" /*RemoteObject*/ ).runVoidMethod ("Put",(Object)(RemoteObject.createImmutable(("Japón"))),(Object)((RemoteObject.createImmutable("Tokio")))); BA.debugLineNum = 40;BA.debugLine="Países.Put(\"Barbados\", \"Bridgetown\")"; Debug.ShouldStop(128); __ref.getField(false,"_países" /*RemoteObject*/ ).runVoidMethod ("Put",(Object)(RemoteObject.createImmutable(("Barbados"))),(Object)((RemoteObject.createImmutable("Bridgetown")))); BA.debugLineNum = 41;BA.debugLine="Países.Put(\"Fiji\", \"Suva\")"; Debug.ShouldStop(256); __ref.getField(false,"_países" /*RemoteObject*/ ).runVoidMethod ("Put",(Object)(RemoteObject.createImmutable(("Fiji"))),(Object)((RemoteObject.createImmutable("Suva")))); BA.debugLineNum = 45;BA.debugLine="Log(\"Mostrar países y sus capitales\")"; Debug.ShouldStop(4096); b4xmainpage.__c.runVoidMethod ("Log",(Object)(RemoteObject.createImmutable("Mostrar países y sus capitales"))); BA.debugLineNum = 46;BA.debugLine="For Each país As String In Países.Keys"; Debug.ShouldStop(8192); { final RemoteObject group17 = __ref.getField(false,"_países" /*RemoteObject*/ ).runMethod(false,"Keys"); final int groupLen17 = group17.runMethod(true,"getSize").<Integer>get() ;int index17 = 0; ; for (; index17 < groupLen17;index17++){ _país = BA.ObjectToString(group17.runMethod(false,"Get",index17));Debug.locals.put("país", _país); Debug.locals.put("país", _país); BA.debugLineNum = 47;BA.debugLine="Log(país & \" \" & Países.Get(país))"; Debug.ShouldStop(16384); b4xmainpage.__c.runVoidMethod ("Log",(Object)(RemoteObject.concat(_país,RemoteObject.createImmutable(" "),__ref.getField(false,"_países" /*RemoteObject*/ ).runMethod(false,"Get",(Object)((_país)))))); } }Debug.locals.put("país", _país); ; BA.debugLineNum = 52;BA.debugLine="Capitales.Initialize"; Debug.ShouldStop(524288); __ref.getField(false,"_capitales" /*RemoteObject*/ ).runVoidMethod ("Initialize"); BA.debugLineNum = 53;BA.debugLine="For Each país As String In Países.Keys"; Debug.ShouldStop(1048576); { final RemoteObject group21 = __ref.getField(false,"_países" /*RemoteObject*/ ).runMethod(false,"Keys"); final int groupLen21 = group21.runMethod(true,"getSize").<Integer>get() ;int index21 = 0; ; for (; index21 < groupLen21;index21++){ _país = BA.ObjectToString(group21.runMethod(false,"Get",index21));Debug.locals.put("país", _país); Debug.locals.put("país", _país); BA.debugLineNum = 54;BA.debugLine="Capitales.Put(Países.Get(país), país)"; Debug.ShouldStop(2097152); __ref.getField(false,"_capitales" /*RemoteObject*/ ).runVoidMethod ("Put",(Object)(__ref.getField(false,"_países" /*RemoteObject*/ ).runMethod(false,"Get",(Object)((_país)))),(Object)((_país))); } }Debug.locals.put("país", _país); ; BA.debugLineNum = 56;BA.debugLine="Log(\"Mostrar capitales y sus países\")"; Debug.ShouldStop(8388608); b4xmainpage.__c.runVoidMethod ("Log",(Object)(RemoteObject.createImmutable("Mostrar capitales y sus países"))); BA.debugLineNum = 57;BA.debugLine="For Each Capital As String In Capitales.Keys"; Debug.ShouldStop(16777216); { final RemoteObject group25 = __ref.getField(false,"_capitales" /*RemoteObject*/ ).runMethod(false,"Keys"); final int groupLen25 = group25.runMethod(true,"getSize").<Integer>get() ;int index25 = 0; ; for (; index25 < groupLen25;index25++){ _capital = BA.ObjectToString(group25.runMethod(false,"Get",index25));Debug.locals.put("Capital", _capital); Debug.locals.put("Capital", _capital); BA.debugLineNum = 58;BA.debugLine="Log(Capital & \" \" & Capitales.Get(Capital))"; Debug.ShouldStop(33554432); b4xmainpage.__c.runVoidMethod ("Log",(Object)(RemoteObject.concat(_capital,RemoteObject.createImmutable(" "),__ref.getField(false,"_capitales" /*RemoteObject*/ ).runMethod(false,"Get",(Object)((_capital)))))); } }Debug.locals.put("Capital", _capital); ; BA.debugLineNum = 61;BA.debugLine="End Sub"; Debug.ShouldStop(268435456); return RemoteObject.createImmutable(""); } catch (Exception e) { throw Debug.ErrorCaught(e); } finally { Debug.PopSubsStack(); }} public static RemoteObject _btnshow_click(RemoteObject __ref) throws Exception{ try { Debug.PushSubsStack("btnShow_Click (b4xmainpage) ","b4xmainpage",1,__ref.getField(false, "ba"),__ref,65); if (RapidSub.canDelegate("btnshow_click")) { return __ref.runUserSub(false, "b4xmainpage","btnshow_click", __ref);} BA.debugLineNum = 65;BA.debugLine="Private Sub btnShow_Click"; Debug.ShouldStop(1); BA.debugLineNum = 66;BA.debugLine="If Capitales.ContainsKey(txtCapital.Text) Then"; Debug.ShouldStop(2); if (__ref.getField(false,"_capitales" /*RemoteObject*/ ).runMethod(true,"ContainsKey",(Object)((__ref.getField(false,"_txtcapital" /*RemoteObject*/ ).runClassMethod (b4j.example.b4xfloattextfield.class, "_gettext" /*RemoteObject*/ )))).<Boolean>get().booleanValue()) { BA.debugLineNum = 67;BA.debugLine="lblCountry.Text = Capitales.Get(txtCapital.Text)"; Debug.ShouldStop(4); __ref.getField(false,"_lblcountry" /*RemoteObject*/ ).runMethod(true,"setText",BA.ObjectToString(__ref.getField(false,"_capitales" /*RemoteObject*/ ).runMethod(false,"Get",(Object)((__ref.getField(false,"_txtcapital" /*RemoteObject*/ ).runClassMethod (b4j.example.b4xfloattextfield.class, "_gettext" /*RemoteObject*/ )))))); }else { BA.debugLineNum = 69;BA.debugLine="lblCountry.Text = \"No conozco \" & txtCapital.tex"; Debug.ShouldStop(16); __ref.getField(false,"_lblcountry" /*RemoteObject*/ ).runMethod(true,"setText",RemoteObject.concat(RemoteObject.createImmutable("No conozco "),__ref.getField(false,"_txtcapital" /*RemoteObject*/ ).runClassMethod (b4j.example.b4xfloattextfield.class, "_gettext" /*RemoteObject*/ ))); }; BA.debugLineNum = 71;BA.debugLine="End Sub"; Debug.ShouldStop(64); return RemoteObject.createImmutable(""); } catch (Exception e) { throw Debug.ErrorCaught(e); } finally { Debug.PopSubsStack(); }} public static RemoteObject _class_globals(RemoteObject __ref) throws Exception{ //BA.debugLineNum = 8;BA.debugLine="Sub Class_Globals"; //BA.debugLineNum = 9;BA.debugLine="Private Root As B4XView"; b4xmainpage._root = RemoteObject.createNew ("anywheresoftware.b4a.objects.B4XViewWrapper");__ref.setField("_root",b4xmainpage._root); //BA.debugLineNum = 10;BA.debugLine="Private xui As XUI"; b4xmainpage._xui = RemoteObject.createNew ("anywheresoftware.b4a.objects.B4XViewWrapper.XUI");__ref.setField("_xui",b4xmainpage._xui); //BA.debugLineNum = 11;BA.debugLine="Private Países As Map"; b4xmainpage._países = RemoteObject.createNew ("anywheresoftware.b4a.objects.collections.Map");__ref.setField("_países",b4xmainpage._países); //BA.debugLineNum = 12;BA.debugLine="Private Capitales As Map"; b4xmainpage._capitales = RemoteObject.createNew ("anywheresoftware.b4a.objects.collections.Map");__ref.setField("_capitales",b4xmainpage._capitales); //BA.debugLineNum = 13;BA.debugLine="Private lblCountry As Label"; b4xmainpage._lblcountry = RemoteObject.createNew ("anywheresoftware.b4j.objects.LabelWrapper");__ref.setField("_lblcountry",b4xmainpage._lblcountry); //BA.debugLineNum = 14;BA.debugLine="Private txtCapital As B4XFloatTextField"; b4xmainpage._txtcapital = RemoteObject.createNew ("b4j.example.b4xfloattextfield");__ref.setField("_txtcapital",b4xmainpage._txtcapital); //BA.debugLineNum = 15;BA.debugLine="End Sub"; return RemoteObject.createImmutable(""); } public static RemoteObject _initialize(RemoteObject __ref,RemoteObject _ba) throws Exception{ try { Debug.PushSubsStack("Initialize (b4xmainpage) ","b4xmainpage",1,__ref.getField(false, "ba"),__ref,17); if (RapidSub.canDelegate("initialize")) { return __ref.runUserSub(false, "b4xmainpage","initialize", __ref, _ba);} __ref.runVoidMethodAndSync("innerInitializeHelper", _ba); Debug.locals.put("ba", _ba); BA.debugLineNum = 17;BA.debugLine="Public Sub Initialize"; Debug.ShouldStop(65536); BA.debugLineNum = 19;BA.debugLine="End Sub"; Debug.ShouldStop(262144); return RemoteObject.createImmutable(""); } catch (Exception e) { throw Debug.ErrorCaught(e); } finally { Debug.PopSubsStack(); }} }
3e11adaeee7aa42710ddddd86c4b3f691944a0d9
1,395
java
Java
src/main/java/ch/crearex/json/schema/builder/StringTypeBuilder.java
crearex/Json
0ef1ba80e2dbcf0a3be4447703d2014ac84f7433
[ "Apache-2.0" ]
1
2018-01-15T18:47:39.000Z
2018-01-15T18:47:39.000Z
src/main/java/ch/crearex/json/schema/builder/StringTypeBuilder.java
crearex/Json
0ef1ba80e2dbcf0a3be4447703d2014ac84f7433
[ "Apache-2.0" ]
null
null
null
src/main/java/ch/crearex/json/schema/builder/StringTypeBuilder.java
crearex/Json
0ef1ba80e2dbcf0a3be4447703d2014ac84f7433
[ "Apache-2.0" ]
null
null
null
36.710526
113
0.817204
7,463
package ch.crearex.json.schema.builder; import ch.crearex.json.dom.JsonObject; import ch.crearex.json.schema.SchemaConstants; import ch.crearex.json.schema.constraints.MaxLengthConstraint; import ch.crearex.json.schema.constraints.MinLengthConstraint; import ch.crearex.json.schema.constraints.RegexConstraint; public class StringTypeBuilder implements TypeBuilder { public StringTypeBuilder() { } @Override public StringType build(JsonObject typeDefinition) { StringType type = new StringType( typeDefinition.getString(SchemaConstants.TITLE_NAME, ""), typeDefinition.getString(SchemaConstants.DESCRIPTION_NAME, "")); if(typeDefinition.isString(SchemaConstants.REGEX_CONSTRAINT)) { type.addConstraint(new RegexConstraint(typeDefinition.getString(SchemaConstants.REGEX_CONSTRAINT))); } if(typeDefinition.isString(SchemaConstants.PATTERN_CONSTRAINT)) { type.addConstraint(new RegexConstraint(typeDefinition.getString(SchemaConstants.PATTERN_CONSTRAINT))); } if(typeDefinition.isNumber(SchemaConstants.MAX_LENGTH_CONSTRAINT)) { type.addConstraint(new MaxLengthConstraint(typeDefinition.getInteger(SchemaConstants.MAX_LENGTH_CONSTRAINT))); } if(typeDefinition.isNumber(SchemaConstants.MIN_LENGTH_CONSTRAINT)) { type.addConstraint(new MinLengthConstraint(typeDefinition.getInteger(SchemaConstants.MIN_LENGTH_CONSTRAINT))); } return type; } }
3e11ade1d58667d2f05fa4f998697c38bd1c1d67
701
java
Java
e-permit/src/main/java/epermit/models/dtos/PermitDto.java
e-permit/e-permit-java
8a48be3f3dc96e6babe75bf8b8913fb50ffdb880
[ "MIT" ]
null
null
null
e-permit/src/main/java/epermit/models/dtos/PermitDto.java
e-permit/e-permit-java
8a48be3f3dc96e6babe75bf8b8913fb50ffdb880
[ "MIT" ]
null
null
null
e-permit/src/main/java/epermit/models/dtos/PermitDto.java
e-permit/e-permit-java
8a48be3f3dc96e6babe75bf8b8913fb50ffdb880
[ "MIT" ]
null
null
null
15.23913
39
0.718973
7,464
package epermit.models.dtos; import java.util.Date; import java.util.UUID; import epermit.models.enums.PermitType; import lombok.Data; @Data public class PermitDto { private UUID id; private String issuer; private String issuedFor; private String permitId; private String qrCode; private int serialNumber; private PermitType permitType; private int permitYear; private String issuedAt; private String expireAt; private String plateNumber; private String companyName; private String companyId; private String claims; private boolean used; private Date usedAt; private boolean revoked; private Date revokedAt; }
3e11ae9629eef4320d7e63b61a9f37c9fb0afcc9
18,708
java
Java
igc-clientlibrary/src/main/java/org/odpi/egeria/connectors/ibm/igc/clientlibrary/model/generated/v11502/Application.java
bramwelt/egeria-connector-ibm-information-server
e0ae48e7c9d0aca1b72100cc091c54a7853488dd
[ "Apache-2.0" ]
null
null
null
igc-clientlibrary/src/main/java/org/odpi/egeria/connectors/ibm/igc/clientlibrary/model/generated/v11502/Application.java
bramwelt/egeria-connector-ibm-information-server
e0ae48e7c9d0aca1b72100cc091c54a7853488dd
[ "Apache-2.0" ]
null
null
null
igc-clientlibrary/src/main/java/org/odpi/egeria/connectors/ibm/igc/clientlibrary/model/generated/v11502/Application.java
bramwelt/egeria-connector-ibm-information-server
e0ae48e7c9d0aca1b72100cc091c54a7853488dd
[ "Apache-2.0" ]
1
2019-08-15T22:13:39.000Z
2019-08-15T22:13:39.000Z
52.111421
241
0.705474
7,465
/* SPDX-License-Identifier: Apache-2.0 */ /* Copyright Contributors to the ODPi Egeria project. */ package org.odpi.egeria.connectors.ibm.igc.clientlibrary.model.generated.v11502; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonTypeName; import javax.annotation.Generated; import org.odpi.egeria.connectors.ibm.igc.clientlibrary.model.common.*; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.Arrays; import java.util.Date; import java.util.List; import java.util.ArrayList; /** * POJO for the {@code application} asset type in IGC, displayed as '{@literal Application}' in the IGC UI. * <br><br> * (this code has been generated based on out-of-the-box IGC metadata types; * if modifications are needed, eg. to handle custom attributes, * extending from this class in your own custom class is the best approach.) */ @Generated("org.odpi.egeria.connectors.ibm.igc.clientlibrary.model.IGCRestModelGenerator") @JsonIgnoreProperties(ignoreUnknown=true) @JsonTypeName("application") public class Application extends Reference { public static String getIgcTypeDisplayName() { return "Application"; } /** * The {@code name} property, displayed as '{@literal Name}' in the IGC UI. */ protected String name; /** * The {@code short_description} property, displayed as '{@literal Short Description}' in the IGC UI. */ protected String short_description; /** * The {@code long_description} property, displayed as '{@literal Long Description}' in the IGC UI. */ protected String long_description; /** * The {@code labels} property, displayed as '{@literal Labels}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link Label} objects. */ protected ReferenceList labels; /** * The {@code stewards} property, displayed as '{@literal Stewards}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link AsclSteward} objects. */ protected ReferenceList stewards; /** * The {@code assigned_to_terms} property, displayed as '{@literal Assigned to Terms}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link Term} objects. */ protected ReferenceList assigned_to_terms; /** * The {@code implements_rules} property, displayed as '{@literal Implements Rules}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link InformationGovernanceRule} objects. */ protected ReferenceList implements_rules; /** * The {@code governed_by_rules} property, displayed as '{@literal Governed by Rules}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link InformationGovernanceRule} objects. */ protected ReferenceList governed_by_rules; /** * The {@code object_types} property, displayed as '{@literal Object Types}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link ObjectType} objects. */ protected ReferenceList object_types; /** * The {@code alias_(business_name)} property, displayed as '{@literal Alias (Business Name)}' in the IGC UI. */ @JsonProperty("alias_(business_name)") protected String alias__business_name_; /** * The {@code reads_from_(static)} property, displayed as '{@literal Reads from (Static)}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link InformationAsset} objects. */ @JsonProperty("reads_from_(static)") protected ReferenceList reads_from__static_; /** * The {@code writes_to_(static)} property, displayed as '{@literal Writes to (Static)}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link InformationAsset} objects. */ @JsonProperty("writes_to_(static)") protected ReferenceList writes_to__static_; /** * The {@code reads_from_(design)} property, displayed as '{@literal Reads from (Design)}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link InformationAsset} objects. */ @JsonProperty("reads_from_(design)") protected ReferenceList reads_from__design_; /** * The {@code writes_to_(design)} property, displayed as '{@literal Writes to (Design)}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link InformationAsset} objects. */ @JsonProperty("writes_to_(design)") protected ReferenceList writes_to__design_; /** * The {@code reads_from_(operational)} property, displayed as '{@literal Reads from (Operational)}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link InformationAsset} objects. */ @JsonProperty("reads_from_(operational)") protected ReferenceList reads_from__operational_; /** * The {@code writes_to_(operational)} property, displayed as '{@literal Writes to (Operational)}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link InformationAsset} objects. */ @JsonProperty("writes_to_(operational)") protected ReferenceList writes_to__operational_; /** * The {@code reads_from_(user_defined)} property, displayed as '{@literal Reads from (User-Defined)}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link InformationAsset} objects. */ @JsonProperty("reads_from_(user_defined)") protected ReferenceList reads_from__user_defined_; /** * The {@code writes_to_(user_defined)} property, displayed as '{@literal Writes to (User-Defined)}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link InformationAsset} objects. */ @JsonProperty("writes_to_(user_defined)") protected ReferenceList writes_to__user_defined_; /** * The {@code impacted_by} property, displayed as '{@literal Impacted by}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link InformationAsset} objects. */ protected ReferenceList impacted_by; /** * The {@code impacts_on} property, displayed as '{@literal Impacts on}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link InformationAsset} objects. */ protected ReferenceList impacts_on; /** * The {@code include_for_business_lineage} property, displayed as '{@literal Include for Business Lineage}' in the IGC UI. */ protected Boolean include_for_business_lineage; /** * The {@code blueprint_elements} property, displayed as '{@literal Blueprint Elements}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link BlueprintElementLink} objects. */ protected ReferenceList blueprint_elements; /** * The {@code in_collections} property, displayed as '{@literal In Collections}' in the IGC UI. * <br><br> * Will be a {@link ReferenceList} of {@link Collection} objects. */ protected ReferenceList in_collections; /** * The {@code created_by} property, displayed as '{@literal Created By}' in the IGC UI. */ protected String created_by; /** * The {@code created_on} property, displayed as '{@literal Created On}' in the IGC UI. */ protected Date created_on; /** * The {@code modified_by} property, displayed as '{@literal Modified By}' in the IGC UI. */ protected String modified_by; /** * The {@code modified_on} property, displayed as '{@literal Modified On}' in the IGC UI. */ protected Date modified_on; /** @see #name */ @JsonProperty("name") public String getTheName() { return this.name; } /** @see #name */ @JsonProperty("name") public void setTheName(String name) { this.name = name; } /** @see #short_description */ @JsonProperty("short_description") public String getShortDescription() { return this.short_description; } /** @see #short_description */ @JsonProperty("short_description") public void setShortDescription(String short_description) { this.short_description = short_description; } /** @see #long_description */ @JsonProperty("long_description") public String getLongDescription() { return this.long_description; } /** @see #long_description */ @JsonProperty("long_description") public void setLongDescription(String long_description) { this.long_description = long_description; } /** @see #labels */ @JsonProperty("labels") public ReferenceList getLabels() { return this.labels; } /** @see #labels */ @JsonProperty("labels") public void setLabels(ReferenceList labels) { this.labels = labels; } /** @see #stewards */ @JsonProperty("stewards") public ReferenceList getStewards() { return this.stewards; } /** @see #stewards */ @JsonProperty("stewards") public void setStewards(ReferenceList stewards) { this.stewards = stewards; } /** @see #assigned_to_terms */ @JsonProperty("assigned_to_terms") public ReferenceList getAssignedToTerms() { return this.assigned_to_terms; } /** @see #assigned_to_terms */ @JsonProperty("assigned_to_terms") public void setAssignedToTerms(ReferenceList assigned_to_terms) { this.assigned_to_terms = assigned_to_terms; } /** @see #implements_rules */ @JsonProperty("implements_rules") public ReferenceList getImplementsRules() { return this.implements_rules; } /** @see #implements_rules */ @JsonProperty("implements_rules") public void setImplementsRules(ReferenceList implements_rules) { this.implements_rules = implements_rules; } /** @see #governed_by_rules */ @JsonProperty("governed_by_rules") public ReferenceList getGovernedByRules() { return this.governed_by_rules; } /** @see #governed_by_rules */ @JsonProperty("governed_by_rules") public void setGovernedByRules(ReferenceList governed_by_rules) { this.governed_by_rules = governed_by_rules; } /** @see #object_types */ @JsonProperty("object_types") public ReferenceList getObjectTypes() { return this.object_types; } /** @see #object_types */ @JsonProperty("object_types") public void setObjectTypes(ReferenceList object_types) { this.object_types = object_types; } /** @see #alias__business_name_ */ @JsonProperty("alias_(business_name)") public String getAliasBusinessName() { return this.alias__business_name_; } /** @see #alias__business_name_ */ @JsonProperty("alias_(business_name)") public void setAliasBusinessName(String alias__business_name_) { this.alias__business_name_ = alias__business_name_; } /** @see #reads_from__static_ */ @JsonProperty("reads_from_(static)") public ReferenceList getReadsFromStatic() { return this.reads_from__static_; } /** @see #reads_from__static_ */ @JsonProperty("reads_from_(static)") public void setReadsFromStatic(ReferenceList reads_from__static_) { this.reads_from__static_ = reads_from__static_; } /** @see #writes_to__static_ */ @JsonProperty("writes_to_(static)") public ReferenceList getWritesToStatic() { return this.writes_to__static_; } /** @see #writes_to__static_ */ @JsonProperty("writes_to_(static)") public void setWritesToStatic(ReferenceList writes_to__static_) { this.writes_to__static_ = writes_to__static_; } /** @see #reads_from__design_ */ @JsonProperty("reads_from_(design)") public ReferenceList getReadsFromDesign() { return this.reads_from__design_; } /** @see #reads_from__design_ */ @JsonProperty("reads_from_(design)") public void setReadsFromDesign(ReferenceList reads_from__design_) { this.reads_from__design_ = reads_from__design_; } /** @see #writes_to__design_ */ @JsonProperty("writes_to_(design)") public ReferenceList getWritesToDesign() { return this.writes_to__design_; } /** @see #writes_to__design_ */ @JsonProperty("writes_to_(design)") public void setWritesToDesign(ReferenceList writes_to__design_) { this.writes_to__design_ = writes_to__design_; } /** @see #reads_from__operational_ */ @JsonProperty("reads_from_(operational)") public ReferenceList getReadsFromOperational() { return this.reads_from__operational_; } /** @see #reads_from__operational_ */ @JsonProperty("reads_from_(operational)") public void setReadsFromOperational(ReferenceList reads_from__operational_) { this.reads_from__operational_ = reads_from__operational_; } /** @see #writes_to__operational_ */ @JsonProperty("writes_to_(operational)") public ReferenceList getWritesToOperational() { return this.writes_to__operational_; } /** @see #writes_to__operational_ */ @JsonProperty("writes_to_(operational)") public void setWritesToOperational(ReferenceList writes_to__operational_) { this.writes_to__operational_ = writes_to__operational_; } /** @see #reads_from__user_defined_ */ @JsonProperty("reads_from_(user_defined)") public ReferenceList getReadsFromUserDefined() { return this.reads_from__user_defined_; } /** @see #reads_from__user_defined_ */ @JsonProperty("reads_from_(user_defined)") public void setReadsFromUserDefined(ReferenceList reads_from__user_defined_) { this.reads_from__user_defined_ = reads_from__user_defined_; } /** @see #writes_to__user_defined_ */ @JsonProperty("writes_to_(user_defined)") public ReferenceList getWritesToUserDefined() { return this.writes_to__user_defined_; } /** @see #writes_to__user_defined_ */ @JsonProperty("writes_to_(user_defined)") public void setWritesToUserDefined(ReferenceList writes_to__user_defined_) { this.writes_to__user_defined_ = writes_to__user_defined_; } /** @see #impacted_by */ @JsonProperty("impacted_by") public ReferenceList getImpactedBy() { return this.impacted_by; } /** @see #impacted_by */ @JsonProperty("impacted_by") public void setImpactedBy(ReferenceList impacted_by) { this.impacted_by = impacted_by; } /** @see #impacts_on */ @JsonProperty("impacts_on") public ReferenceList getImpactsOn() { return this.impacts_on; } /** @see #impacts_on */ @JsonProperty("impacts_on") public void setImpactsOn(ReferenceList impacts_on) { this.impacts_on = impacts_on; } /** @see #include_for_business_lineage */ @JsonProperty("include_for_business_lineage") public Boolean getIncludeForBusinessLineage() { return this.include_for_business_lineage; } /** @see #include_for_business_lineage */ @JsonProperty("include_for_business_lineage") public void setIncludeForBusinessLineage(Boolean include_for_business_lineage) { this.include_for_business_lineage = include_for_business_lineage; } /** @see #blueprint_elements */ @JsonProperty("blueprint_elements") public ReferenceList getBlueprintElements() { return this.blueprint_elements; } /** @see #blueprint_elements */ @JsonProperty("blueprint_elements") public void setBlueprintElements(ReferenceList blueprint_elements) { this.blueprint_elements = blueprint_elements; } /** @see #in_collections */ @JsonProperty("in_collections") public ReferenceList getInCollections() { return this.in_collections; } /** @see #in_collections */ @JsonProperty("in_collections") public void setInCollections(ReferenceList in_collections) { this.in_collections = in_collections; } /** @see #created_by */ @JsonProperty("created_by") public String getCreatedBy() { return this.created_by; } /** @see #created_by */ @JsonProperty("created_by") public void setCreatedBy(String created_by) { this.created_by = created_by; } /** @see #created_on */ @JsonProperty("created_on") public Date getCreatedOn() { return this.created_on; } /** @see #created_on */ @JsonProperty("created_on") public void setCreatedOn(Date created_on) { this.created_on = created_on; } /** @see #modified_by */ @JsonProperty("modified_by") public String getModifiedBy() { return this.modified_by; } /** @see #modified_by */ @JsonProperty("modified_by") public void setModifiedBy(String modified_by) { this.modified_by = modified_by; } /** @see #modified_on */ @JsonProperty("modified_on") public Date getModifiedOn() { return this.modified_on; } /** @see #modified_on */ @JsonProperty("modified_on") public void setModifiedOn(Date modified_on) { this.modified_on = modified_on; } public static Boolean canBeCreated() { return false; } public static Boolean includesModificationDetails() { return true; } private static final List<String> NON_RELATIONAL_PROPERTIES = Arrays.asList( "name", "short_description", "long_description", "alias_(business_name)", "include_for_business_lineage", "created_by", "created_on", "modified_by", "modified_on" ); private static final List<String> STRING_PROPERTIES = Arrays.asList( "name", "short_description", "long_description", "alias_(business_name)", "created_by", "modified_by" ); private static final List<String> PAGED_RELATIONAL_PROPERTIES = Arrays.asList( "labels", "stewards", "assigned_to_terms", "implements_rules", "governed_by_rules", "object_types", "reads_from_(static)", "writes_to_(static)", "reads_from_(design)", "writes_to_(design)", "reads_from_(operational)", "writes_to_(operational)", "reads_from_(user_defined)", "writes_to_(user_defined)", "impacted_by", "impacts_on", "blueprint_elements", "in_collections" ); private static final List<String> ALL_PROPERTIES = Arrays.asList( "name", "short_description", "long_description", "labels", "stewards", "assigned_to_terms", "implements_rules", "governed_by_rules", "object_types", "alias_(business_name)", "reads_from_(static)", "writes_to_(static)", "reads_from_(design)", "writes_to_(design)", "reads_from_(operational)", "writes_to_(operational)", "reads_from_(user_defined)", "writes_to_(user_defined)", "impacted_by", "impacts_on", "include_for_business_lineage", "blueprint_elements", "in_collections", "created_by", "created_on", "modified_by", "modified_on" ); public static List<String> getNonRelationshipProperties() { return NON_RELATIONAL_PROPERTIES; } public static List<String> getStringProperties() { return STRING_PROPERTIES; } public static List<String> getPagedRelationshipProperties() { return PAGED_RELATIONAL_PROPERTIES; } public static List<String> getAllProperties() { return ALL_PROPERTIES; } public static Boolean isApplication(Object obj) { return (obj.getClass() == Application.class); } }
3e11afafb42f3f02bfe4812acac3f8d417683bb2
5,575
java
Java
assertj-swing/src/main/java/org/assertj/swing/monitor/Windows.java
DaveBrad/assertj-swing
0d74888990c5124f6a2dee90e0d848da0125f0b2
[ "Apache-2.0" ]
70
2015-01-19T08:40:59.000Z
2020-06-20T12:59:57.000Z
assertj-swing/src/main/java/org/assertj/swing/monitor/Windows.java
DaveBrad/assertj-swing
0d74888990c5124f6a2dee90e0d848da0125f0b2
[ "Apache-2.0" ]
143
2015-01-04T01:43:55.000Z
2020-09-13T07:42:45.000Z
assertj-swing/src/main/java/org/assertj/swing/monitor/Windows.java
DaveBrad/assertj-swing
0d74888990c5124f6a2dee90e0d848da0125f0b2
[ "Apache-2.0" ]
47
2015-01-26T12:41:27.000Z
2020-08-27T13:39:13.000Z
27.59901
120
0.658117
7,466
/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Copyright 2012-2018 the original author or authors. */ package org.assertj.swing.monitor; import static org.assertj.swing.util.Maps.newWeakHashMap; import java.awt.Component; import java.awt.Window; import java.util.Map; import java.util.Timer; import java.util.TimerTask; import javax.annotation.Nonnull; import javax.annotation.concurrent.GuardedBy; import javax.annotation.concurrent.ThreadSafe; import org.assertj.core.util.VisibleForTesting; import org.assertj.swing.annotation.RunsInCurrentThread; /** * Information collected by the monitors in this package. * * @author Alex Ruiz */ @ThreadSafe class Windows { @VisibleForTesting static int WINDOW_READY_DELAY = 10000; /** {@link Window#isShowing() isShowing} is true but are not yet ready for input. */ @GuardedBy("lock") final Map<Window, TimerTask> pending = newWeakHashMap(); /** Considered to be ready to use. */ @GuardedBy("lock") final Map<Window, Boolean> open = newWeakHashMap(); /** Have sent a {@link java.awt.event.WindowEvent#WINDOW_CLOSED WINDOW_CLOSED} event. */ @GuardedBy("lock") final Map<Window, Boolean> closed = newWeakHashMap(); /** Not visible. */ @GuardedBy("lock") final Map<Window, Boolean> hidden = newWeakHashMap(); private final Timer windowReadyTimer; private final Object lock = new Object(); Windows() { windowReadyTimer = new Timer("Window Ready Timer", true); } /** * Creates a new {@link WindowVisibilityMonitor} and attaches it to the given {@code Window}. * * @param target the {@code Window} to attach the new monitor to. */ void attachNewWindowVisibilityMonitor(Window target) { WindowVisibilityMonitor monitor = new WindowVisibilityMonitor(this); target.addWindowListener(monitor); target.addComponentListener(monitor); } /** * Marks the given window as "ready to use" and if not showing, as "hidden." * * @param w the given window. */ @RunsInCurrentThread void markExisting(@Nonnull Window w) { synchronized (lock) { open.put(w, true); if (!w.isShowing()) { hidden.put(w, true); } } } /** * Marks the given window as "hidden." * * @param w the given window. */ void markAsHidden(@Nonnull Window w) { synchronized (lock) { hidden.put(w, true); removeWindowFrom(w, pending); } } /** * Marks the given window as "showing." * * @param w the given window. */ void markAsShowing(final @Nonnull Window w) { synchronized (lock) { TimerTask task = new TimerTask() { @Override public void run() { markAsReady(w); } }; windowReadyTimer.schedule(new ProtectingTimerTask(task), WINDOW_READY_DELAY); pending.put(w, task); } } /** * Marks the given window as "ready to receive OS-level event input." * * @param w the given window. */ void markAsReady(@Nonnull Window w) { synchronized (lock) { if (!pending.containsKey(w)) { return; } removeWindowFrom(w, closed, hidden, pending); open.put(w, true); } } /** * Marks the given window as "closed." * * @param w the given window. */ void markAsClosed(@Nonnull Window w) { synchronized (lock) { removeWindowFrom(w, open, hidden, pending); closed.put(w, true); } } private void removeWindowFrom(Window w, Map<?, ?>... maps) { for (Map<?, ?> map : maps) { map.remove(w); } } /** * Indicates whether the given AWT or Swing {@code Component} is a closed {@code Window}. * * @param c the given {@code Component}. * @return {@code true} if the given {@code Component} is a closed {@code Window}, {@code false} otherwise. */ boolean isClosed(@Nonnull Component c) { synchronized (lock) { return closed.containsKey(c); } } /** * Indicates whether the given {@code Window} is ready to receive OS-level event input. * * @param w the given {@code Window}. * @return {@code true} if the given {@code Window} is ready to receive OS-level event input, {@code false} otherwise. */ boolean isReady(@Nonnull Window w) { synchronized (lock) { return open.containsKey(w) && !hidden.containsKey(w); } } /** * Indicates whether the given {@code Window} is hidden. * * @param w the given {@code Window}. * @return {@code true} if the given {@code Window} is hidden, {@code false} otherwise. */ boolean isHidden(@Nonnull Window w) { synchronized (lock) { return hidden.containsKey(w); } } /** * Indicates the given {@code Window} is showing but not ready to receive OS-level event input. * * @param w the given {@code Window}. * @return {@code true} if the given {@code Window} is showing but not not ready to receive OS-level event input, * {@code false} otherwise. */ boolean isShowingButNotReady(@Nonnull Window w) { synchronized (lock) { return pending.containsKey(w); } } }
3e11b108417b1bafb6e2553122cb51dd79b8c864
777
java
Java
daemon-base/blackcoin-daemon/src/test/java/tech/coinbub/daemon/blackcoin/GetNewAddressIT.java
CoinBub/daemon-interfaces
52389af9fc81f8e3a51f84122bde996199f38039
[ "MIT" ]
null
null
null
daemon-base/blackcoin-daemon/src/test/java/tech/coinbub/daemon/blackcoin/GetNewAddressIT.java
CoinBub/daemon-interfaces
52389af9fc81f8e3a51f84122bde996199f38039
[ "MIT" ]
null
null
null
daemon-base/blackcoin-daemon/src/test/java/tech/coinbub/daemon/blackcoin/GetNewAddressIT.java
CoinBub/daemon-interfaces
52389af9fc81f8e3a51f84122bde996199f38039
[ "MIT" ]
null
null
null
35.318182
76
0.801802
7,467
package tech.coinbub.daemon.blackcoin; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import tech.coinbub.daemon.Blackcoin; import tech.coinbub.daemon.testutils.docker.Dockerized; import tech.coinbub.daemon.testutils.suites.StandardGetNewAddressIT; @ExtendWith(Dockerized.class) @DockerizedBlackcoin public class GetNewAddressIT implements StandardGetNewAddressIT<Blackcoin> { @Test @Override public void canGetAddressForDefaultAccount(final Blackcoin blackcoin) { final String address = blackcoin.getnewaddress(); assertThat(address.length(), is(equalTo(34))); } }
3e11b17ccbf67ee3568caa8308ca080bafe3dd4d
1,047
java
Java
src/main/java/com/training/springbootbuyitem/service/UserDetailsServiceImpl.java
Johnymiro/fullstack-spring-react-ecommerce
7d8bad8fabd849e784de87bb91beb0d1bb38dcf1
[ "Apache-2.0" ]
2
2021-08-09T08:24:44.000Z
2021-08-09T08:24:48.000Z
src/main/java/com/training/springbootbuyitem/service/UserDetailsServiceImpl.java
Johnymiro/fullstack-spring-react-ecommerce
7d8bad8fabd849e784de87bb91beb0d1bb38dcf1
[ "Apache-2.0" ]
null
null
null
src/main/java/com/training/springbootbuyitem/service/UserDetailsServiceImpl.java
Johnymiro/fullstack-spring-react-ecommerce
7d8bad8fabd849e784de87bb91beb0d1bb38dcf1
[ "Apache-2.0" ]
null
null
null
38.777778
111
0.810888
7,468
package com.training.springbootbuyitem.service; import com.training.springbootbuyitem.entity.model.User; import com.training.springbootbuyitem.repository.UserRepository; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.security.core.userdetails.UserDetails; import org.springframework.security.core.userdetails.UserDetailsService; import org.springframework.security.core.userdetails.UsernameNotFoundException; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; @Service public class UserDetailsServiceImpl implements UserDetailsService { @Autowired UserRepository userRepository; @Override @Transactional public UserDetails loadUserByUsername(String username) throws UsernameNotFoundException { User user = userRepository.findByUsername(username) .orElseThrow(() -> new UsernameNotFoundException("User Not Found with username: " + username)); return UserDetailsImpl.build(user); } }
3e11b25e834ff1ee12fb3a0a0f6df606b21698bb
1,109
java
Java
src/main/java/com/tct/codec/AuthCodeMessageCodec.java
bug123luo/gunLocationMqServer
c0eb690c551aef6e6b360a2b77c2b0b0128bc157
[ "Apache-2.0" ]
null
null
null
src/main/java/com/tct/codec/AuthCodeMessageCodec.java
bug123luo/gunLocationMqServer
c0eb690c551aef6e6b360a2b77c2b0b0128bc157
[ "Apache-2.0" ]
null
null
null
src/main/java/com/tct/codec/AuthCodeMessageCodec.java
bug123luo/gunLocationMqServer
c0eb690c551aef6e6b360a2b77c2b0b0128bc157
[ "Apache-2.0" ]
null
null
null
33.606061
91
0.797115
7,469
package com.tct.codec; import com.alibaba.fastjson.JSONObject; import com.tct.codec.pojo.AuthCodeMessage; import com.tct.codec.pojo.AuthCodeMessageBody; public class AuthCodeMessageCodec implements MessageCodec{ @Override public Object decode(String inMsg) throws Exception { JSONObject json= JSONObject.parseObject(inMsg); AuthCodeMessage authCodeMessage = new AuthCodeMessage(); authCodeMessage.setServiceType(json.getString("serviceType")); authCodeMessage.setFormatVersion(json.getString("formatVersion")); authCodeMessage.setDeviceType(json.getInteger("deviceType")); authCodeMessage.setSerialNumber(json.getString("serialNumber")); authCodeMessage.setMessageType(json.getString("messageType")); authCodeMessage.setSendTime(json.getString("sendTime")); authCodeMessage.setMessageBody(json.getObject("messageBody", AuthCodeMessageBody.class)); //authCodeMessage.setMessageBody((AuthCodeMessageBody)json.get("messageBody")); return authCodeMessage; } public String encode(Object outMsg) throws Exception { // TODO Auto-generated method stub return null; } }
3e11b32fa21e67ab29bb83a3e06b4b5ed43bf4e5
1,154
java
Java
art-extension/opttests/src/OptimizationTests/ShortLeafMethodsInlining/InvokeVirtual_const_001/Main.java
pramulkant/https-github.com-android-art-intel-marshmallow
87e8c22f248164780b92aaa0cdea14bf6cda3859
[ "Apache-2.0" ]
8
2016-08-11T09:46:36.000Z
2018-03-02T17:28:35.000Z
art-extension/opttests/src/OptimizationTests/ShortLeafMethodsInlining/InvokeVirtual_const_001/Main.java
pramulkant/https-github.com-android-art-intel-marshmallow
87e8c22f248164780b92aaa0cdea14bf6cda3859
[ "Apache-2.0" ]
null
null
null
art-extension/opttests/src/OptimizationTests/ShortLeafMethodsInlining/InvokeVirtual_const_001/Main.java
pramulkant/https-github.com-android-art-intel-marshmallow
87e8c22f248164780b92aaa0cdea14bf6cda3859
[ "Apache-2.0" ]
null
null
null
33.941176
93
0.674177
7,471
/* * Copyright (C) 2015 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package OptimizationTests.ShortLeafMethodsInlining.InvokeVirtual_const_001; class Main { final static int iterations = 10; public static void main(String[] args) { Test test = new Test(); char workJ = '\u0000'; System.out.println("Initial workJ value is " + String.format("\\u%04x", (int)workJ)); for(int i = 0; i < iterations; i++) { workJ = (char)(test.simple_method() + workJ); } System.out.println("Final workJ value is " + String.format("\\u%04x", (int)workJ)); } }
3e11b33da96483133d99f48cc350ec7600eba132
2,457
java
Java
src/main/java/application/model/JavaColumn.java
878102433/-JavaBeanUtil
0064f6899adc5d3cd6b69aa7f3d89efe20d0bd8d
[ "Apache-2.0" ]
1
2018-10-09T05:04:24.000Z
2018-10-09T05:04:24.000Z
src/main/java/application/model/JavaColumn.java
878102433/-JavaBeanUtil
0064f6899adc5d3cd6b69aa7f3d89efe20d0bd8d
[ "Apache-2.0" ]
null
null
null
src/main/java/application/model/JavaColumn.java
878102433/-JavaBeanUtil
0064f6899adc5d3cd6b69aa7f3d89efe20d0bd8d
[ "Apache-2.0" ]
null
null
null
26.138298
69
0.750509
7,472
package application.model; import java.util.ArrayList; import java.util.List; import javax.xml.bind.annotation.XmlElement; public class JavaColumn implements Cloneable { private String columnName; private String dataType; private List<String> importNames; private List<String> annotationGetNames; private List<String> annotationSetNames; @Override public JavaColumn clone() { JavaColumn javaColumn = null; try { javaColumn = (JavaColumn) super.clone(); javaColumn.setImportNames(null); javaColumn.setAnnotationGetNames(null); javaColumn.setAnnotationSetNames(null); if (importNames != null) { List<String> importNamesClone = new ArrayList<String>(); for (String importName : importNames) { importNamesClone.add(importName); } javaColumn.setImportNames(importNamesClone); } if (annotationGetNames != null) { List<String> annotationGetNamesClone = new ArrayList<String>(); for (String annotationGetName : annotationGetNames) { annotationGetNamesClone.add(annotationGetName); } javaColumn.setAnnotationGetNames(annotationGetNamesClone); } if (annotationSetNames != null) { List<String> annotationSetNamesClone = new ArrayList<String>(); for (String annotationSetName : annotationSetNames) { annotationSetNamesClone.add(annotationSetName); } javaColumn.setAnnotationSetNames(annotationSetNamesClone); } } catch (CloneNotSupportedException e) { e.printStackTrace(); } return javaColumn; } public String getColumnName() { return columnName; } public void setColumnName(String columnName) { this.columnName = columnName; } public String getDataType() { return dataType; } public void setDataType(String dataType) { this.dataType = dataType; } @XmlElement(name = "importName") public List<String> getImportNames() { return importNames; } public void setImportNames(List<String> importNames) { this.importNames = importNames; } @XmlElement(name = "annotationGetName") public List<String> getAnnotationGetNames() { return annotationGetNames; } public void setAnnotationGetNames(List<String> annotationGetNames) { this.annotationGetNames = annotationGetNames; } @XmlElement(name = "annotationSetName") public List<String> getAnnotationSetNames() { return annotationSetNames; } public void setAnnotationSetNames(List<String> annotationSetNames) { this.annotationSetNames = annotationSetNames; } }
3e11b3414778b6235ca7a81a897143972479f477
8,032
java
Java
amasel-mws-lib/src/main/java/co/amasel/model/reports/GetReportListResponse.java
zaro/mws-rest-api
7f6cd3e8e911c6494001f0b620b6f6be07e8ac0a
[ "MIT" ]
2
2019-01-15T16:08:33.000Z
2020-05-01T08:29:51.000Z
amasel-mws-lib/src/main/java/co/amasel/model/reports/GetReportListResponse.java
zaro/mws-rest-api
7f6cd3e8e911c6494001f0b620b6f6be07e8ac0a
[ "MIT" ]
null
null
null
amasel-mws-lib/src/main/java/co/amasel/model/reports/GetReportListResponse.java
zaro/mws-rest-api
7f6cd3e8e911c6494001f0b620b6f6be07e8ac0a
[ "MIT" ]
null
null
null
24.713846
92
0.676793
7,473
package co.amasel.model.reports; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlType; import co.amasel.model.common.AmaselMwsObject; import co.amasel.model.common.ResponseHeaderMetadata; import co.amasel.model.common.ResponseMetadata; import com.amazonservices.mws.client.MwsReader; import com.amazonservices.mws.client.MwsWriter; import com.amazonservices.mws.client.MwsResponseHeaderMetadata; /** * <p> * Java class for anonymous complex type. * * <p> * The following schema fragment specifies the expected content contained within * this class. * * <pre> * &lt;complexType> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;sequence> * &lt;element ref="{http://mws.amazonaws.com/doc/2009-01-01/}GetReportListResult"/> * &lt;element ref="{http://mws.amazonaws.com/doc/2009-01-01/}ResponseMetadata"/> * &lt;/sequence> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * </pre> * * Generated by AWS Code Generator * <p/> * Wed Feb 18 13:28:59 PST 2009 * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "", propOrder = {"getReportListResult", "responseMetadata"}) @XmlRootElement(name = "GetReportListResponse") public class GetReportListResponse extends AmaselMwsObject { @XmlElement(name = "GetReportListResult", required = true) protected GetReportListResult getReportListResult; @XmlElement(name = "ResponseMetadata", required = true) protected ResponseMetadata responseMetadata; protected MwsResponseHeaderMetadata mwsResponseHeaderMetadata; /** * Default constructor * */ public GetReportListResponse() { super(); } /** * Value constructor * */ public GetReportListResponse(final GetReportListResult getReportListResult, final ResponseMetadata responseMetadata) { this.getReportListResult = getReportListResult; this.responseMetadata = responseMetadata; } /** * Gets the value of the getReportListResult property. * * @return possible object is {@link GetReportListResult } * */ public GetReportListResult getGetReportListResult() { return getReportListResult; } /** * Sets the value of the getReportListResult property. * * @param value * allowed object is {@link GetReportListResult } * */ public void setGetReportListResult(GetReportListResult value) { this.getReportListResult = value; } public boolean isSetGetReportListResult() { return (this.getReportListResult != null); } /** * Gets the value of the responseMetadata property. * * @return possible object is {@link ResponseMetadata } * */ public ResponseMetadata getResponseMetadata() { return responseMetadata; } /** * Sets the value of the responseMetadata property. * * @param value * allowed object is {@link ResponseMetadata } * */ public void setResponseMetadata(ResponseMetadata value) { this.responseMetadata = value; } public boolean isSetResponseMetadata() { return (this.responseMetadata != null); } /** * Sets the value of the GetReportListResult property. * * @param value * @return this instance */ public GetReportListResponse withGetReportListResult( GetReportListResult value) { setGetReportListResult(value); return this; } /** * Sets the value of the ResponseMetadata property. * * @param value * @return this instance */ public GetReportListResponse withResponseMetadata(ResponseMetadata value) { setResponseMetadata(value); return this; } @javax.xml.bind.annotation.XmlTransient private ResponseHeaderMetadata responseHeaderMetadata; public boolean isSetResponseHeaderMetadata() { return this.responseHeaderMetadata != null; } public void setResponseHeaderMetadata( ResponseHeaderMetadata responseHeaderMetadata) { this.responseHeaderMetadata = responseHeaderMetadata; } public ResponseHeaderMetadata getResponseHeaderMetadata() { return responseHeaderMetadata; } /** * * XML string representation of this object * * @return XML String */ public String toXML() { StringBuffer xml = new StringBuffer(); xml.append("<GetReportListResponse xmlns=\"http://mws.amazonaws.com/doc/2009-01-01/\">"); if (isSetGetReportListResult()) { GetReportListResult getReportListResult = getGetReportListResult(); xml.append("<GetReportListResult>"); xml.append(getReportListResult.toXMLFragment()); xml.append("</GetReportListResult>"); } if (isSetResponseMetadata()) { ResponseMetadata responseMetadata = getResponseMetadata(); xml.append("<ResponseMetadata>"); xml.append(responseMetadata.toXMLFragment()); xml.append("</ResponseMetadata>"); } xml.append("</GetReportListResponse>"); return xml.toString(); } /** * * Escape XML special characters */ private String escapeXML(String string) { StringBuffer sb = new StringBuffer(); int length = string.length(); for (int i = 0; i < length; ++i) { char c = string.charAt(i); switch (c) { case '&' : sb.append("&amp;"); break; case '<' : sb.append("&lt;"); break; case '>' : sb.append("&gt;"); break; case '\'' : sb.append("&#039;"); break; case '"' : sb.append("&quot;"); break; default : sb.append(c); } } return sb.toString(); } /** * * JSON string representation of this object * * @return JSON String */ public String toJSON() { StringBuffer json = new StringBuffer(); json.append("{\"GetReportListResponse\" : {"); json.append(quoteJSON("@xmlns")); json.append(" : "); json.append(quoteJSON("http://mws.amazonaws.com/doc/2009-01-01/")); boolean first = true; json.append(", "); if (isSetGetReportListResult()) { if (!first) json.append(", "); json.append("\"GetReportListResult\" : {"); GetReportListResult getReportListResult = getGetReportListResult(); json.append(getReportListResult.toJSONFragment()); json.append("}"); first = false; } if (isSetResponseMetadata()) { if (!first) json.append(", "); json.append("\"ResponseMetadata\" : {"); ResponseMetadata responseMetadata = getResponseMetadata(); json.append(responseMetadata.toJSONFragment()); json.append("}"); first = false; } json.append("}"); json.append("}"); return json.toString(); } /** * * Quote JSON string */ private String quoteJSON(String string) { StringBuffer sb = new StringBuffer(); sb.append("\""); int length = string.length(); for (int i = 0; i < length; ++i) { char c = string.charAt(i); switch (c) { case '"' : sb.append("\\\""); break; case '\\' : sb.append("\\\\"); break; case '/' : sb.append("\\/"); break; case '\b' : sb.append("\\b"); break; case '\f' : sb.append("\\f"); break; case '\n' : sb.append("\\n"); break; case '\r' : sb.append("\\r"); break; case '\t' : sb.append("\\t"); break; default : if (c < ' ') { sb.append("\\u" + String.format("%03x", Integer.valueOf(c))); } else { sb.append(c); } } } sb.append("\""); return sb.toString(); } public void readFragmentFrom(MwsReader reader) { getReportListResult = reader.read("GetReportListResult", GetReportListResult.class); responseMetadata = reader.read("ResponseMetadata", ResponseMetadata.class); } public void writeFragmentTo(MwsWriter writer) { writer.write("GetReportListResult", getReportListResult); writer.write("ResponseMetadata", responseMetadata); } public MwsResponseHeaderMetadata getMwsHeaderMetadata() { return this.mwsResponseHeaderMetadata; } public void setMwsHeaderMetadata(MwsResponseHeaderMetadata hmd) { this.mwsResponseHeaderMetadata = hmd; } }
3e11b34b4e3ad601678cfb6a009048c2bbd520d1
795
java
Java
mgenerate-core/src/test/java/uk/dioxic/mgenerate/core/operator/chrono/SecondTest.java
dioxic/mgenerate
b9c743f9a4f574e9e8d9deb28c05e9d867135446
[ "Apache-2.0" ]
7
2018-10-29T11:03:18.000Z
2022-03-23T14:03:29.000Z
mgenerate-core/src/test/java/uk/dioxic/mgenerate/core/operator/chrono/SecondTest.java
dioxic/mgenerate
b9c743f9a4f574e9e8d9deb28c05e9d867135446
[ "Apache-2.0" ]
4
2020-08-05T08:10:31.000Z
2021-04-23T05:53:36.000Z
mgenerate-core/src/test/java/uk/dioxic/mgenerate/core/operator/chrono/SecondTest.java
dioxic/mgenerate
b9c743f9a4f574e9e8d9deb28c05e9d867135446
[ "Apache-2.0" ]
3
2019-10-31T14:54:43.000Z
2021-03-19T14:48:32.000Z
26.5
95
0.70566
7,474
package uk.dioxic.mgenerate.core.operator.chrono; import org.junit.jupiter.api.Test; import uk.dioxic.mgenerate.core.transformer.ReflectiveTransformerRegistry; import java.time.LocalDateTime; import static org.assertj.core.api.Assertions.assertThat; class SecondTest { private LocalDateTime ldt = LocalDateTime.now(); @Test void resolve_Random() { Second second = new SecondBuilder(ReflectiveTransformerRegistry.getInstance()).build(); assertThat(second.resolveInternal()).isBetween(0, 59); } @Test void resolve_FromDate() { Second second = new SecondBuilder(ReflectiveTransformerRegistry.getInstance()) .date(ldt) .build(); assertThat(second.resolveInternal()).isEqualTo(ldt.getSecond()); } }
3e11b3516ab8397f0819ea3b36d25db28fe6fd73
532
java
Java
src/main/java/com/barpiotr/MyFirstMavenApp/userMenu/UserMenuController.java
JavaDVeloper/00_TEST_APP
b3b22bcbefb53427b82818a38c5b5259b1090c28
[ "CC-BY-3.0" ]
null
null
null
src/main/java/com/barpiotr/MyFirstMavenApp/userMenu/UserMenuController.java
JavaDVeloper/00_TEST_APP
b3b22bcbefb53427b82818a38c5b5259b1090c28
[ "CC-BY-3.0" ]
null
null
null
src/main/java/com/barpiotr/MyFirstMavenApp/userMenu/UserMenuController.java
JavaDVeloper/00_TEST_APP
b3b22bcbefb53427b82818a38c5b5259b1090c28
[ "CC-BY-3.0" ]
null
null
null
22.166667
77
0.753759
7,475
package com.barpiotr.MyFirstMavenApp.userMenu; import java.util.Scanner; public class UserMenuController { private UserMenuConsoleListView view; private UserMenuDAO model; Scanner input = new Scanner(System.in); public UserMenuController(UserMenuConsoleListView view, UserMenuDAO model) { this.view = view; this.model = model; } public void display() { view.setUserMenuList(model.getAllUsersMenu()); view.display(); System.out.println("\nPress enter return to the main menu"); this.input.nextLine(); } }
3e11b36a4759f3ee1a006786d5a32f9c2fe85e67
1,586
java
Java
integration-tests/src/test/java/eu/tsystems/mms/tic/testframework/test/pagefactory/PageOptionsTest.java
amikhalc/testerra
1dcdeb01874bc41524afe1fc995faf1fab0ddd62
[ "Apache-2.0" ]
19
2020-06-09T06:21:58.000Z
2022-02-23T08:02:22.000Z
integration-tests/src/test/java/eu/tsystems/mms/tic/testframework/test/pagefactory/PageOptionsTest.java
amikhalc/testerra
1dcdeb01874bc41524afe1fc995faf1fab0ddd62
[ "Apache-2.0" ]
103
2021-03-16T09:37:04.000Z
2022-03-31T11:07:55.000Z
integration-tests/src/test/java/eu/tsystems/mms/tic/testframework/test/pagefactory/PageOptionsTest.java
amikhalc/testerra
1dcdeb01874bc41524afe1fc995faf1fab0ddd62
[ "Apache-2.0" ]
12
2020-06-09T10:23:53.000Z
2022-03-28T10:53:22.000Z
37.761905
115
0.75599
7,476
/* * Testerra * * (C) 2020, Eric Kubenka, T-Systems Multimedia Solutions GmbH, Deutsche Telekom AG * * Deutsche Telekom AG and all other contributors / * copyright owners license this file to you under the Apache * License, Version 2.0 (the "License"); you may not use this * file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ package eu.tsystems.mms.tic.testframework.test.pagefactory; import eu.tsystems.mms.tic.testframework.AbstractTestSitesTest; import eu.tsystems.mms.tic.testframework.core.pageobjects.testdata.PageWithPageOptions; import eu.tsystems.mms.tic.testframework.pageobjects.factory.PageFactory; import eu.tsystems.mms.tic.testframework.webdrivermanager.WebDriverManager; import org.testng.Assert; import org.testng.annotations.Test; public class PageOptionsTest extends AbstractTestSitesTest { @Test public void testT01_PageOptions_ElementTimeout() { PageWithPageOptions page = PageFactory.create(PageWithPageOptions.class, WebDriverManager.getWebDriver()); Assert.assertEquals(page.existingElement.getTimeoutInSeconds(), 3, "Timeout value from page options"); } }
3e11b3e07c9d7624d7417157d404688ade427c2c
1,027
java
Java
gmall-sms/src/main/java/com/atguigu/gmall/sms/entity/HomeSubjectSpuEntity.java
linyu902/gmall
83d9e1d232ed94916c482064fa2caee35865fe84
[ "Apache-2.0" ]
null
null
null
gmall-sms/src/main/java/com/atguigu/gmall/sms/entity/HomeSubjectSpuEntity.java
linyu902/gmall
83d9e1d232ed94916c482064fa2caee35865fe84
[ "Apache-2.0" ]
10
2020-02-28T01:14:28.000Z
2022-03-31T21:57:35.000Z
gmall-sms/src/main/java/com/atguigu/gmall/sms/entity/HomeSubjectSpuEntity.java
linyu902/gmall
83d9e1d232ed94916c482064fa2caee35865fe84
[ "Apache-2.0" ]
2
2020-01-06T00:33:18.000Z
2020-08-12T09:27:13.000Z
19.807692
59
0.704854
7,477
package com.atguigu.gmall.sms.entity; import com.baomidou.mybatisplus.annotation.TableId; import com.baomidou.mybatisplus.annotation.TableName; import io.swagger.annotations.ApiModel; import io.swagger.annotations.ApiModelProperty; import java.io.Serializable; import java.util.Date; import lombok.Data; /** * 专题商品 * * @author linyu902 * @email anpch@example.com * @date 2019-12-02 10:42:39 */ @ApiModel @Data @TableName("sms_home_subject_spu") public class HomeSubjectSpuEntity implements Serializable { private static final long serialVersionUID = 1L; /** * id */ @TableId @ApiModelProperty(name = "id",value = "id") private Long id; /** * 专题名字 */ @ApiModelProperty(name = "name",value = "专题名字") private String name; /** * 专题id */ @ApiModelProperty(name = "subjectId",value = "专题id") private Long subjectId; /** * spu_id */ @ApiModelProperty(name = "spuId",value = "spu_id") private Long spuId; /** * 排序 */ @ApiModelProperty(name = "sort",value = "排序") private Integer sort; }
3e11b585f0b6ca57883508843940e35af1484bcb
3,101
java
Java
atlas-lib/src/main/java/es/redmic/atlaslib/dto/layer/LogoURLDTO.java
redmic-project/server-atlas
faee34e5afea52c526626f2e23cc0490bbee1562
[ "Apache-2.0" ]
null
null
null
atlas-lib/src/main/java/es/redmic/atlaslib/dto/layer/LogoURLDTO.java
redmic-project/server-atlas
faee34e5afea52c526626f2e23cc0490bbee1562
[ "Apache-2.0" ]
null
null
null
atlas-lib/src/main/java/es/redmic/atlaslib/dto/layer/LogoURLDTO.java
redmic-project/server-atlas
faee34e5afea52c526626f2e23cc0490bbee1562
[ "Apache-2.0" ]
null
null
null
25.008065
108
0.676233
7,478
package es.redmic.atlaslib.dto.layer; /*- * #%L * Atlas-lib * %% * Copyright (C) 2019 REDMIC Project / Server * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import org.apache.avro.Schema; import com.fasterxml.jackson.annotation.JsonIgnore; public class LogoURLDTO extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { // @formatter:off @JsonIgnore public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse( "{\"type\":\"record\",\"name\":\"LogoURLDTO\",\"namespace\":\"es.redmic.atlaslib.dto.layer\",\"fields\":[" + "{\"name\":\"format\",\"type\":[\"string\", \"null\"]}," + "{\"name\":\"onlineResource\",\"type\":[\"string\", \"null\"]}]}"); // @formatter:on private String format; private String onlineResource; public String getFormat() { return format; } public void setFormat(String format) { this.format = format; } public String getOnlineResource() { return onlineResource; } public void setOnlineResource(String onlineResource) { this.onlineResource = onlineResource; } @Override public int hashCode() { final int prime = 31; int result = super.hashCode(); result = prime * result + ((format == null) ? 0 : format.hashCode()); result = prime * result + ((onlineResource == null) ? 0 : onlineResource.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (!super.equals(obj)) return false; if (getClass() != obj.getClass()) return false; LogoURLDTO other = (LogoURLDTO) obj; if (format == null) { if (other.format != null) return false; } else if (!format.equals(other.format)) return false; if (onlineResource == null) { if (other.onlineResource != null) return false; } else if (!onlineResource.equals(other.onlineResource)) return false; return true; } @JsonIgnore @Override public Schema getSchema() { return SCHEMA$; } @JsonIgnore @Override public java.lang.Object get(int field$) { switch (field$) { case 0: return format; case 1: return onlineResource; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @JsonIgnore @Override public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: format = value$ != null ? value$.toString() : null; break; case 1: onlineResource = value$ != null ? value$.toString() : null; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } }
3e11b7559fabc9c41f5f385ccb13f2d1b44e8c23
2,553
java
Java
plugin/src/main/java/ru/beykerykt/lightapi/server/ServerModManager.java
Qveshn/LightAPI
1f284fa30086a090eb4be71141bf8551e5a3a7ac
[ "MIT" ]
27
2018-01-16T11:48:15.000Z
2021-08-30T11:28:33.000Z
plugin/src/main/java/ru/beykerykt/lightapi/server/ServerModManager.java
Qveshn/LightAPI
1f284fa30086a090eb4be71141bf8551e5a3a7ac
[ "MIT" ]
44
2018-01-19T12:50:01.000Z
2022-03-11T23:29:52.000Z
plugin/src/main/java/ru/beykerykt/lightapi/server/ServerModManager.java
Qveshn/LightAPI
1f284fa30086a090eb4be71141bf8551e5a3a7ac
[ "MIT" ]
32
2018-01-16T09:09:51.000Z
2021-08-07T20:32:36.000Z
33.181818
104
0.754599
7,479
/* * The MIT License (MIT) * * Copyright (c) 2017 Vladimir Mikhailov <efpyi@example.com> * Copyright (c) 2021 Qveshn * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package ru.beykerykt.lightapi.server; import ru.beykerykt.lightapi.server.nms.INMSHandler; import ru.beykerykt.lightapi.utils.Utils; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; public class ServerModManager { private static Map<String, ServerModInfo> supportImpl = new ConcurrentHashMap<String, ServerModInfo>(); private static INMSHandler handler; public static Class<? extends INMSHandler> findImplementaion(String modName) { ServerModInfo impl = supportImpl.get(modName); return impl != null ? impl.getVersions().get(Utils.serverVersion()) : null; } public static void initImplementaion(Class<? extends INMSHandler> clazz) throws Exception { ServerModManager.handler = clazz.getConstructor().newInstance(); } public static void shutdown() { if (isInitialized()) { handler = null; } } public static boolean isInitialized() { return handler != null; } public static boolean registerServerMod(ServerModInfo info) { if (supportImpl.containsKey(info.getModName())) { return false; } supportImpl.put(info.getModName(), info); return true; } public static boolean unregisterServerMod(String modName) { if (supportImpl.containsKey(modName)) { return false; } supportImpl.remove(modName); return true; } public static INMSHandler getNMSHandler() { return handler; } }
3e11b84fd229c7252ade719d25f2079e9be4c4e7
3,579
java
Java
bt-core/src/main/java/bt/peer/lan/LocalServiceDiscoveryInfo.java
java-app-scans/bt
dcd0f41a5b2f5250b0b9af24dd0f1ccac23eed4d
[ "Apache-2.0" ]
2,199
2016-08-11T18:02:48.000Z
2022-03-31T11:02:54.000Z
bt-core/src/main/java/bt/peer/lan/LocalServiceDiscoveryInfo.java
java-app-scans/bt
dcd0f41a5b2f5250b0b9af24dd0f1ccac23eed4d
[ "Apache-2.0" ]
179
2016-09-01T22:42:54.000Z
2022-01-28T06:18:30.000Z
bt-core/src/main/java/bt/peer/lan/LocalServiceDiscoveryInfo.java
java-app-scans/bt
dcd0f41a5b2f5250b0b9af24dd0f1ccac23eed4d
[ "Apache-2.0" ]
380
2016-11-03T14:39:10.000Z
2022-03-26T08:31:48.000Z
36.520408
118
0.713328
7,480
/* * Copyright (c) 2016—2021 Andrei Tomashpolskiy and individual contributors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package bt.peer.lan; import bt.net.InternetProtocolUtils; import bt.net.SocketChannelConnectionAcceptor; import java.net.InetSocketAddress; import java.net.NetworkInterface; import java.net.ProtocolFamily; import java.net.StandardProtocolFamily; import java.util.Collection; import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; import static bt.net.InternetProtocolUtils.isIP4; import static bt.net.InternetProtocolUtils.isIP6; import static java.util.Collections.unmodifiableCollection; import static java.util.Collections.unmodifiableSet; public class LocalServiceDiscoveryInfo implements ILocalServiceDiscoveryInfo { private final Set<Integer> localPorts; private final Collection<AnnounceGroup> compatibleGroups; private final Collection<NetworkInterface> networkInterfaces; public LocalServiceDiscoveryInfo( Set<SocketChannelConnectionAcceptor> socketAcceptors, Collection<AnnounceGroup> announceGroups) { this.localPorts = unmodifiableSet(collectLocalPorts(socketAcceptors)); Collection<NetworkInterface> networkInterfaces = new HashSet<>(); boolean acceptIP4 = false; boolean acceptIP6 = false; for (SocketChannelConnectionAcceptor acceptor : socketAcceptors) { networkInterfaces.add(acceptor.getNetworkInterface()); InetSocketAddress address = acceptor.getLocalAddress(); ProtocolFamily protocolFamily = InternetProtocolUtils.getProtocolFamily(address.getAddress()); if (protocolFamily == StandardProtocolFamily.INET) { acceptIP4 = true; } else { acceptIP6 = true; } if (acceptIP4 && acceptIP6) { break; // no need to look further } } this.compatibleGroups = unmodifiableCollection(collectCompatibleGroups(announceGroups, acceptIP4, acceptIP6)); this.networkInterfaces = unmodifiableCollection(networkInterfaces); } private Set<Integer> collectLocalPorts(Set<SocketChannelConnectionAcceptor> socketAcceptors) { return socketAcceptors.stream().map(a -> a.getLocalAddress().getPort()).collect(Collectors.toSet()); } private Collection<AnnounceGroup> collectCompatibleGroups( Collection<AnnounceGroup> groups, boolean acceptIP4, boolean acceptIP6) { return groups.stream() .filter(group -> (isIP4(group.getAddress()) && acceptIP4) || (isIP6(group.getAddress()) && acceptIP6)) .collect(Collectors.toList()); } @Override public Set<Integer> getLocalPorts() { return localPorts; } @Override public Collection<AnnounceGroup> getCompatibleGroups() { return compatibleGroups; } @Override public Collection<NetworkInterface> getNetworkInterfaces() { return networkInterfaces; } }
3e11b9890fe8149b67e144b1d7498177e07b5e56
3,720
java
Java
ibm.jdk8/src/java/security/AlgorithmConstraints.java
flyzsd/java-code-snippets
1202b941ec4686d157fbc8643b65d247c6cd2b27
[ "MIT" ]
null
null
null
ibm.jdk8/src/java/security/AlgorithmConstraints.java
flyzsd/java-code-snippets
1202b941ec4686d157fbc8643b65d247c6cd2b27
[ "MIT" ]
null
null
null
ibm.jdk8/src/java/security/AlgorithmConstraints.java
flyzsd/java-code-snippets
1202b941ec4686d157fbc8643b65d247c6cd2b27
[ "MIT" ]
null
null
null
31
81
0.664785
7,481
/*=========================================================================== * Licensed Materials - Property of IBM * "Restricted Materials of IBM" * * IBM SDK, Java(tm) Technology Edition, v8 * (C) Copyright IBM Corp. 2010, 2010. All Rights Reserved * * US Government Users Restricted Rights - Use, duplication or disclosure * restricted by GSA ADP Schedule Contract with IBM Corp. *=========================================================================== */ /* * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. * * * * * * * * * * * * * * * * * * * * */ package java.security; import java.util.Set; /** * This interface specifies constraints for cryptographic algorithms, * keys (key sizes), and other algorithm parameters. * <p> * {@code AlgorithmConstraints} objects are immutable. An implementation * of this interface should not provide methods that can change the state * of an instance once it has been created. * <p> * Note that {@code AlgorithmConstraints} can be used to represent the * restrictions described by the security properties * {@code jdk.certpath.disabledAlgorithms} and * {@code jdk.tls.disabledAlgorithms}, or could be used by a * concrete {@code PKIXCertPathChecker} to check whether a specified * certificate in the certification path contains the required algorithm * constraints. * * @see javax.net.ssl.SSLParameters#getAlgorithmConstraints * @see javax.net.ssl.SSLParameters#setAlgorithmConstraints(AlgorithmConstraints) * * @since 1.7 */ public interface AlgorithmConstraints { /** * Determines whether an algorithm is granted permission for the * specified cryptographic primitives. * * @param primitives a set of cryptographic primitives * @param algorithm the algorithm name * @param parameters the algorithm parameters, or null if no additional * parameters * * @return true if the algorithm is permitted and can be used for all * of the specified cryptographic primitives * * @throws IllegalArgumentException if primitives or algorithm is null * or empty */ public boolean permits(Set<CryptoPrimitive> primitives, String algorithm, AlgorithmParameters parameters); /** * Determines whether a key is granted permission for the specified * cryptographic primitives. * <p> * This method is usually used to check key size and key usage. * * @param primitives a set of cryptographic primitives * @param key the key * * @return true if the key can be used for all of the specified * cryptographic primitives * * @throws IllegalArgumentException if primitives is null or empty, * or the key is null */ public boolean permits(Set<CryptoPrimitive> primitives, Key key); /** * Determines whether an algorithm and the corresponding key are granted * permission for the specified cryptographic primitives. * * @param primitives a set of cryptographic primitives * @param algorithm the algorithm name * @param key the key * @param parameters the algorithm parameters, or null if no additional * parameters * * @return true if the key and the algorithm can be used for all of the * specified cryptographic primitives * * @throws IllegalArgumentException if primitives or algorithm is null * or empty, or the key is null */ public boolean permits(Set<CryptoPrimitive> primitives, String algorithm, Key key, AlgorithmParameters parameters); }
3e11ba10bf585186d1a2a9958f5724ffce10d8f3
1,009
java
Java
Leetcode/0659. Split Array into Consecutive Subsequences/0659.java
Next-Gen-UI/Code-Dynamics
a9b9d5e3f27e870b3e030c75a1060d88292de01c
[ "MIT" ]
null
null
null
Leetcode/0659. Split Array into Consecutive Subsequences/0659.java
Next-Gen-UI/Code-Dynamics
a9b9d5e3f27e870b3e030c75a1060d88292de01c
[ "MIT" ]
null
null
null
Leetcode/0659. Split Array into Consecutive Subsequences/0659.java
Next-Gen-UI/Code-Dynamics
a9b9d5e3f27e870b3e030c75a1060d88292de01c
[ "MIT" ]
null
null
null
31.53125
80
0.565907
7,482
import java.util.List; class Solution { public boolean isPossible(int[] nums) { Map<Integer, Integer> count = new HashMap<>(); List<Integer> starts = new ArrayList<>(); // start index of subsequence List<Integer> ends = new ArrayList<>(); // end index of subsequence for (final int num : nums) count.put(num, count.getOrDefault(num, 0) + 1); for (int i = 0; i < nums.length; ++i) { if (i > 0 && nums[i] == nums[i - 1]) continue; final int num = nums[i]; final int currCount = count.get(num); final int prevCount = count.containsKey(num - 1) ? count.get(num - 1) : 0; final int nextCount = count.containsKey(num + 1) ? count.get(num + 1) : 0; for (int j = 0; j < currCount - prevCount; ++j) starts.add(num); for (int j = 0; j < currCount - nextCount; ++j) ends.add(num); } for (int i = 0; i < starts.size(); ++i) if (ends.get(i) - starts.get(i) < 2) return false; return true; } }
3e11ba4e7d0566da4c3c09dd14c1f8e23bc10fc3
961
java
Java
serialization/src/test/java/net/corda/serialization/internal/amqp/JavaSerialiseEnumTests.java
atoulme/corda
62fe4bc65aa525af2f1465bfbc86618da28c32d3
[ "Apache-2.0" ]
4,168
2016-11-28T11:59:46.000Z
2022-03-28T10:55:31.000Z
serialization/src/test/java/net/corda/serialization/internal/amqp/JavaSerialiseEnumTests.java
atoulme/corda
62fe4bc65aa525af2f1465bfbc86618da28c32d3
[ "Apache-2.0" ]
3,246
2016-11-30T10:50:42.000Z
2022-03-31T13:26:17.000Z
serialization/src/test/java/net/corda/serialization/internal/amqp/JavaSerialiseEnumTests.java
atoulme/corda
62fe4bc65aa525af2f1465bfbc86618da28c32d3
[ "Apache-2.0" ]
1,270
2016-11-30T09:44:44.000Z
2022-03-31T18:01:46.000Z
26.694444
97
0.705515
7,483
package net.corda.serialization.internal.amqp; import net.corda.serialization.internal.amqp.testutils.TestSerializationContext; import org.junit.Test; import java.io.NotSerializableException; import static net.corda.serialization.internal.amqp.testutils.AMQPTestUtilsKt.testDefaultFactory; public class JavaSerialiseEnumTests { public enum Bras { TSHIRT, UNDERWIRE, PUSHUP, BRALETTE, STRAPLESS, SPORTS, BACKLESS, PADDED } private static class Bra { private final Bras bra; private Bra(Bras bra) { this.bra = bra; } public Bras getBra() { return this.bra; } } @Test public void testJavaConstructorAnnotations() throws NotSerializableException { Bra bra = new Bra(Bras.UNDERWIRE); SerializationOutput ser = new SerializationOutput(testDefaultFactory()); ser.serialize(bra, TestSerializationContext.testSerializationContext); } }
3e11ba872f748a845c1e427a5032bcb7f8e3442d
1,586
java
Java
modules/web/test/UITests/src/test/java/TestConstants.java
dnwick/editor
ce6da4a23dcdb8b81aab8ae21529140fbaf4790c
[ "Apache-2.0" ]
null
null
null
modules/web/test/UITests/src/test/java/TestConstants.java
dnwick/editor
ce6da4a23dcdb8b81aab8ae21529140fbaf4790c
[ "Apache-2.0" ]
null
null
null
modules/web/test/UITests/src/test/java/TestConstants.java
dnwick/editor
ce6da4a23dcdb8b81aab8ae21529140fbaf4790c
[ "Apache-2.0" ]
null
null
null
44.055556
123
0.691677
7,484
/* * Copyright (c) 2017, WSO2 Inc. (http://wso2.com) All Rights Reserved. * <p> * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ public class TestConstants { //XPATHS public static final String WELCOME_PAGE_OPEN_BUTTON_XPATH = "/html/body/div[1]/div[2]/div/div[3]/div[2]/div/div/div" + "/div[2]/div[1]/div/div/div/div[1]/div[3]/button"; public static final String FILE_OPEN_POPUP_LOCATION_INPUT_XPATH = "/html/body/div[11]/div/div/div[2]/div/form/div[1]" + "/div/input"; public static final String FILE_OPEN_POPUP_LOCATION_OPEN_XPATH = "/html/body/div[11]/div/div/div[2]/div/form/div[3]" + "/div/button[1]"; public static final String SVG_XPATH = "/html/body/div[1]/div[2]/div/div[3]/div[2]/div/div/div/div[2]/div[1]/div[2]" + "/div[1]/div[4]"; public static final String SERVER_URL = "http://localhost:9091"; public static final String BALLERINA_RESOURCE_FOLDER = "BallerinaSourceFiles"; public static final String DOM_RESOURCE_FOLDER = "DOMFiles"; }
3e11bb4504dc4eee14c1984fec64bd95cb86ac8a
7,815
java
Java
application/src/main/java/com/sap/cloud/s4hana/examples/addressmgr/LocalAddressServlet.java
sbabuwc/cloud-s4-sdk-book
642b694501bfc21cbd73dbe3b18c83044c94a47b
[ "Apache-2.0" ]
1
2020-01-10T08:08:45.000Z
2020-01-10T08:08:45.000Z
application/src/main/java/com/sap/cloud/s4hana/examples/addressmgr/LocalAddressServlet.java
sbabuwc/cloud-s4-sdk-book
642b694501bfc21cbd73dbe3b18c83044c94a47b
[ "Apache-2.0" ]
null
null
null
application/src/main/java/com/sap/cloud/s4hana/examples/addressmgr/LocalAddressServlet.java
sbabuwc/cloud-s4-sdk-book
642b694501bfc21cbd73dbe3b18c83044c94a47b
[ "Apache-2.0" ]
null
null
null
47.652439
161
0.717338
7,485
package com.sap.cloud.s4hana.examples.addressmgr; import com.google.gson.Gson; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.EnumUtils; import org.slf4j.Logger; import javax.persistence.EntityManager; import javax.persistence.EntityTransaction; import javax.servlet.annotation.WebServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.IOException; import java.util.List; import com.sap.cloud.s4hana.examples.addressmgr.commands.CreateAddressCommand; import com.sap.cloud.s4hana.examples.addressmgr.datasource.MultitenantEntityManagerFacade; import com.sap.cloud.s4hana.examples.addressmgr.models.Address; import com.sap.cloud.s4hana.examples.addressmgr.models.Status; import com.sap.cloud.s4hana.examples.addressmgr.util.HttpServlet; import com.sap.cloud.s4hana.examples.addressmgr.views.AddressView; import com.sap.cloud.s4hana.examples.addressmgr.views.ViewModelConverter; import com.sap.cloud.sdk.cloudplatform.logging.CloudLoggerFactory; import com.sap.cloud.sdk.s4hana.datamodel.odata.namespaces.businesspartner.BusinessPartnerAddress; import com.sap.cloud.sdk.s4hana.datamodel.odata.services.DefaultBusinessPartnerService; @WebServlet("/api/addresses-local") public class LocalAddressServlet extends HttpServlet { private static final long serialVersionUID = 1L; private static final Logger logger = CloudLoggerFactory.getLogger(LocalAddressServlet.class); @Override protected void doGet(final HttpServletRequest request, final HttpServletResponse response) throws IOException { final String status = request.getParameter("status"); final EntityManager entityManager = MultitenantEntityManagerFacade.getInstance().getEntityManager(); final EntityTransaction transaction = entityManager.getTransaction(); transaction.begin(); // Retrieve all addresses if (status == null) { final List<Address> addresses = entityManager.createNamedQuery("Address.findAll", Address.class).getResultList(); final List<AddressView> addressViews = ViewModelConverter.convertAddressesToAddressViews(addresses); response.setContentType("application/json"); response.getWriter().write(new Gson().toJson(addressViews)); // Retrieve addresses by status } else if (EnumUtils.isValidEnum(Status.class, status)) { final List<Address> addresses = entityManager.createNamedQuery("Address.findByStatus", Address.class).setParameter("status", status).getResultList(); final List<AddressView> addressViews = ViewModelConverter.convertAddressesToAddressViews(addresses); response.setContentType("application/json"); response.getWriter().write(new Gson().toJson(addressViews)); // Fail: wrong status in HTTP request } else { logger.error("Wrong status value. Possible values: new, approved, rejected."); response.setStatus(HttpServletResponse.SC_BAD_REQUEST); } transaction.commit(); entityManager.close(); } @Override protected void doPost(final HttpServletRequest request, final HttpServletResponse resp) throws IOException { final AddressView addressView = getAddressFromBody(request); final Address address = ViewModelConverter.convertAddressViewToAddress(addressView); address.setStatus(Status.NEW.toString()); logger.info("Received post request to create local address {}", address); final EntityManager entityManager = MultitenantEntityManagerFacade.getInstance().getEntityManager(); final EntityTransaction transaction = entityManager.getTransaction(); // Persist the address transaction.begin(); entityManager.persist(address); transaction.commit(); // Retrieve all current local addresses transaction.begin(); final List<Address> addresses = entityManager.createNamedQuery("Address.findAll", Address.class).getResultList(); transaction.commit(); entityManager.close(); final List<AddressView> addressViews = ViewModelConverter.convertAddressesToAddressViews(addresses); resp.setStatus(HttpServletResponse.SC_CREATED); resp.setContentType("application/json"); resp.getWriter().write(new Gson().toJson(addressViews)); } private AddressView getAddressFromBody(final HttpServletRequest request) throws IOException { final String body = IOUtils.toString(request.getInputStream(), "utf-8"); return new Gson().fromJson(body, AddressView.class); } @Override protected void doPatch(final HttpServletRequest request, final HttpServletResponse response) throws IOException { try { final Long addressId = Long.parseLong(request.getParameter("addressId")); final Status status = Status.valueOf(request.getParameter("status")); final EntityManager entityManager = MultitenantEntityManagerFacade.getInstance().getEntityManager(); final EntityTransaction transaction = entityManager.getTransaction(); transaction.begin(); final Address address = entityManager.find(Address.class, addressId); transaction.commit(); if (address == null) { throw new IllegalArgumentException(String.format("Address with the id %d does not exist", addressId)); } else { switch (status) { case APPROVED: approveAddress(address, entityManager); break; case REJECTED: rejectAddress(address, entityManager); break; } } entityManager.close(); response.setStatus(HttpServletResponse.SC_NO_CONTENT); } catch (IllegalArgumentException e) { logger.error("Wrong parameters in the HTTP request"); response.setStatus(HttpServletResponse.SC_BAD_REQUEST); } } private void approveAddress(final Address address, final EntityManager entityManager) { logger.info("Address approved - changing the status and creating in S/4HANA {}", address); final EntityTransaction transaction = entityManager.getTransaction(); transaction.begin(); address.setStatus(Status.APPROVED.toString()); transaction.commit(); BusinessPartnerAddress businessPartnerAddress = convertAddress(address); final DefaultBusinessPartnerService businessPartnerService = new DefaultBusinessPartnerService(); new CreateAddressCommand(businessPartnerService, businessPartnerAddress).execute(); } private BusinessPartnerAddress convertAddress(final Address address) { final BusinessPartnerAddress businessPartnerAddress = new BusinessPartnerAddress(); businessPartnerAddress.setBusinessPartner(address.getBusinessPartner()); businessPartnerAddress.setCityName(address.getCityName()); businessPartnerAddress.setCountry(address.getCountry()); businessPartnerAddress.setHouseNumber(address.getHouseNumber()); businessPartnerAddress.setPostalCode(address.getPostalCode()); businessPartnerAddress.setStreetName(address.getStreetName()); return businessPartnerAddress; } private void rejectAddress(final Address address, final EntityManager entityManager) { logger.info("Address rejected - changing the status {}", address); final EntityTransaction transaction = entityManager.getTransaction(); transaction.begin(); address.setStatus(Status.REJECTED.toString()); transaction.commit(); } }
3e11bc31544e12b60085ed89a51d3f409264c14a
900
java
Java
Example Code/10 File IO/Java/Sample Input/SaveTextFile.java
tdfairch2/Course-Common-Files
09294bdae6da92011136b31207d053893801e299
[ "MIT" ]
null
null
null
Example Code/10 File IO/Java/Sample Input/SaveTextFile.java
tdfairch2/Course-Common-Files
09294bdae6da92011136b31207d053893801e299
[ "MIT" ]
null
null
null
Example Code/10 File IO/Java/Sample Input/SaveTextFile.java
tdfairch2/Course-Common-Files
09294bdae6da92011136b31207d053893801e299
[ "MIT" ]
null
null
null
25.714286
78
0.427778
7,486
import java.io.*; public class SaveTextFile { public static void main() { String filename = "alice.txt"; try { FileReader fr = new FileReader( filename ); BufferedReader br = new BufferedReader( fr ); String line = null; int lineCount = 0; while ( true ) { line = br.readLine(); if ( line == null ) { break; } System.out.println( lineCount + "\t" + line ); lineCount++; } br.close(); } catch( FileNotFoundException ex ) { System.out.println( "Error opening file " + filename ); } catch( IOException ex ) { System.out.println( "Error reading file " + filename ); } } }
3e11be22fdf4fd0db68b24370637d3abfaac72d1
2,692
java
Java
telemetry_examples/src/main/java/com/newrelic/telemetry/examples/SpanExample.java
zuluecho9/newrelic-telemetry-sdk-java
6264bf7f9147d7bff7d112f442b45c80902cffb1
[ "Apache-2.0" ]
26
2019-09-01T23:13:57.000Z
2022-03-01T21:46:47.000Z
telemetry_examples/src/main/java/com/newrelic/telemetry/examples/SpanExample.java
zuluecho9/newrelic-telemetry-sdk-java
6264bf7f9147d7bff7d112f442b45c80902cffb1
[ "Apache-2.0" ]
148
2019-08-27T22:14:37.000Z
2022-01-31T23:01:51.000Z
telemetry_examples/src/main/java/com/newrelic/telemetry/examples/SpanExample.java
zuluecho9/newrelic-telemetry-sdk-java
6264bf7f9147d7bff7d112f442b45c80902cffb1
[ "Apache-2.0" ]
31
2019-09-14T05:12:47.000Z
2022-01-30T09:40:07.000Z
34.512821
98
0.693536
7,487
/* * Copyright 2020 New Relic Corporation. All rights reserved. * SPDX-License-Identifier: Apache-2.0 */ package com.newrelic.telemetry.examples; import com.newrelic.telemetry.Attributes; import com.newrelic.telemetry.OkHttpPoster; import com.newrelic.telemetry.SpanBatchSenderFactory; import com.newrelic.telemetry.spans.Span; import com.newrelic.telemetry.spans.SpanBatch; import com.newrelic.telemetry.spans.SpanBatchSender; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * This is an example of sending a batch of Spans to New Relic. * * <p>A SpanBatchSender is created with the License key and the reference http implementation from * OkHttp. An example batch of 4 spans (apples, oranges, beer, wine) is created and then sent via * sender.sendBatch(). * * <p>To run this example, pass the License Key as a command line argument. */ public class SpanExample { private static final Logger logger = LoggerFactory.getLogger(SpanExample.class); private static final ThreadLocalRandom random = ThreadLocalRandom.current(); private static final List<String> items = Arrays.asList("apples", "oranges", "beer", "wine"); public static void main(String[] args) throws Exception { logger.info("Starting the SpanExample"); String licenseKey = args[0]; SpanBatchSender sender = SpanBatchSender.create( SpanBatchSenderFactory.fromHttpImplementation(OkHttpPoster::new) .configureWith(licenseKey) .useLicenseKey(true) .auditLoggingEnabled(true) .build()); List<Span> spans = new ArrayList<>(); String traceId = UUID.randomUUID().toString(); long spanStartTime = System.currentTimeMillis(); String parentId = null; for (String item : items) { int durationMs = random.nextInt(1000); String spanId = UUID.randomUUID().toString(); spans.add( Span.builder(spanId) .traceId(traceId) .name(item) .parentId(parentId) .durationMs(durationMs) .timestamp(spanStartTime) .serviceName("Telemetry SDK Span Example (" + item + ")") .build()); spanStartTime += durationMs; parentId = spanId; } sender.sendBatch(new SpanBatch(spans, getCommonAttributes(), traceId)); } /** These attributes are shared across all spans submitted in the batch. */ private static Attributes getCommonAttributes() { return new Attributes().put("exampleName", "SpanExample"); } }
3e11bf971f5129b82c2c9d3cf5e187c95a15a5cb
5,998
java
Java
inspector/src/main/java/com/taobao/weex/devtools/toolbox/EventOverviewFragment.java
weexteam/android_devtools_android
c812795cc74f67a777c184d23806257489bc96af
[ "Apache-2.0" ]
134
2016-11-03T12:00:09.000Z
2019-06-05T14:45:41.000Z
inspector/src/main/java/com/taobao/weex/devtools/toolbox/EventOverviewFragment.java
weexteam/android_devtools_android
c812795cc74f67a777c184d23806257489bc96af
[ "Apache-2.0" ]
25
2016-11-10T08:16:56.000Z
2019-05-31T03:32:53.000Z
inspector/src/main/java/com/taobao/weex/devtools/toolbox/EventOverviewFragment.java
weexteam/android_devtools_android
c812795cc74f67a777c184d23806257489bc96af
[ "Apache-2.0" ]
35
2016-11-17T03:34:06.000Z
2019-05-27T06:13:59.000Z
35.702381
114
0.682561
7,488
package com.taobao.weex.devtools.toolbox; import android.os.Bundle; import android.support.v4.app.Fragment; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ImageView; import android.widget.LinearLayout; import android.widget.TextView; import com.taobao.weex.WXSDKManager; import com.taobao.weex.adapter.ITracingAdapter; import com.taobao.weex.devtools.adapter.WXTracingAdapter; import com.taobao.weex.tracing.WXTracing; import com.taobao.weex.ui.WXRenderManager; import com.taobao.weex.ui.component.WXComponent; import com.taobao.weex.ui.view.listview.WXRecyclerView; import com.taobao.weex.inspector.R; /** * A simple {@link Fragment} subclass. */ public class EventOverviewFragment extends Fragment { private RecyclerView list; public EventOverviewFragment() { // Required empty public constructor } public static EventOverviewFragment getInstance(int instanceId) { Bundle bundle = new Bundle(); bundle.putInt("instanceId", instanceId); EventOverviewFragment fragment = new EventOverviewFragment(); fragment.setArguments(bundle); return fragment; } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View root = inflater.inflate(R.layout.fragment_event_overview, container, false); int instanceId = getArguments().getInt("instanceId", -1); list = (RecyclerView) root.findViewById(R.id.perf_list); list.setLayoutManager(new LinearLayoutManager(getContext())); ITracingAdapter adapter = WXSDKManager.getInstance().getTracingAdapter(); if (adapter != null && adapter instanceof WXTracingAdapter) { if (instanceId != -1) { list.setAdapter(new PerfListAdapter(((WXTracingAdapter) adapter).getTraceEventByInstanceId(instanceId))); } } return root; } private class PerfListAdapter extends WXRecyclerView.Adapter<ItemHolder> { private WXTracing.TraceEvent rootEvent; public PerfListAdapter(WXTracing.TraceEvent rootEvent) { this.rootEvent = rootEvent; } @Override public ItemHolder onCreateViewHolder(ViewGroup parent, int viewType) { return new ItemHolder(LayoutInflater.from(parent.getContext()) .inflate(R.layout.item_trace_event_item, parent, false)); } @Override public void onBindViewHolder(final ItemHolder holder, int position) { final WXTracing.TraceEvent event = rootEvent.subEvents.valueAt(position); holder.itemView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (event.subEvents == null) { return; } getActivity() .getSupportFragmentManager() .beginTransaction() .replace(R.id.fragment_container, EventDetailFragment.getFragment(rootEvent.traceId, event.traceId)) .addToBackStack(EventDetailFragment.class.getSimpleName()) .commitAllowingStateLoss(); } }); if (event.subEvents == null) { holder.info.setVisibility(View.INVISIBLE); } else { holder.info.setVisibility(View.VISIBLE); } if(event.ts < rootEvent.ts) { rootEvent.ts = event.ts; } holder.actionName.setText(event.fname); holder.actionDuration.setText(event.duration + " ms"); final ViewGroup.MarginLayoutParams lp = (ViewGroup.MarginLayoutParams) holder.duration.getLayoutParams(); holder.itemView.post(new Runnable() { @Override public void run() { long gap = event.ts - rootEvent.ts; int margin = (int) (gap / rootEvent.duration * holder.getItemWidth()); int width = (int) (event.duration / rootEvent.duration * holder.getItemWidth()); lp.width = width; lp.leftMargin = margin; holder.duration.setLayoutParams(lp); } }); if (event.ref != null) { WXRenderManager renderManager = WXSDKManager.getInstance().getWXRenderManager(); WXComponent component = renderManager.getWXComponent(event.iid, event.ref); if (component != null) { String type = component.getComponentType(); holder.compType.setText("<" + type + "/>"); if (component.getRealView() != null) { holder.viewType.setText(component.getRealView().getClass().getSimpleName()); } if (component.isLazy()) { holder.compType.append(" @lazy"); } holder.compRef.setText("Ref: " + component.getRef()); } } else { holder.compType.setText("-"); holder.viewType.setText("-"); holder.compRef.setText("-"); } } @Override public int getItemCount() { return rootEvent.subEvents.size(); } } public class ItemHolder extends RecyclerView.ViewHolder { public TextView actionName; public TextView compRef; public View duration; public LinearLayout infoContent; public TextView actionDuration; public TextView viewType; public TextView compType; public ImageView info; public ItemHolder(View itemView) { super(itemView); actionName = (TextView) itemView.findViewById(R.id.action_name); compRef = (TextView) itemView.findViewById(R.id.comp_ref); duration = (View) itemView.findViewById(R.id.duration); infoContent = (LinearLayout) itemView.findViewById(R.id.info_content); actionDuration = (TextView) itemView.findViewById(R.id.action_duration); viewType = (TextView) itemView.findViewById(R.id.view_type); compType = (TextView) itemView.findViewById(R.id.comp_type); info = (ImageView) itemView.findViewById(R.id.info); } public int getItemWidth() { return infoContent.getMeasuredWidth() - 200; } } }
3e11bfb9e7d906d78baaed456f8ce8d59c7abe56
2,387
java
Java
googleplay-desktop/src/main/java/games/spooky/gdx/gameservices/googleplay/desktop/GooglePlaySnapshotWrapper.java
spookygames/gdx-gameservices
cec40a9886ce70cada6e365e6eb54c13ff8db1a5
[ "MIT" ]
3
2017-08-01T07:31:22.000Z
2018-05-02T14:55:01.000Z
googleplay-desktop/src/main/java/games/spooky/gdx/gameservices/googleplay/desktop/GooglePlaySnapshotWrapper.java
spookygames/gdx-gameservices
cec40a9886ce70cada6e365e6eb54c13ff8db1a5
[ "MIT" ]
2
2017-10-05T11:51:39.000Z
2021-04-30T10:39:46.000Z
googleplay-desktop/src/main/java/games/spooky/gdx/gameservices/googleplay/desktop/GooglePlaySnapshotWrapper.java
spookygames/gdx-gameservices
cec40a9886ce70cada6e365e6eb54c13ff8db1a5
[ "MIT" ]
2
2018-05-24T19:20:47.000Z
2018-07-20T11:43:38.000Z
31
84
0.70088
7,489
/* * The MIT License (MIT) * * Copyright (c) 2017-2021 Spooky Games * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package games.spooky.gdx.gameservices.googleplay.desktop; import com.google.api.client.util.DateTime; import com.google.api.services.drive.model.File; import games.spooky.gdx.gameservices.savedgame.SavedGame; public class GooglePlaySnapshotWrapper implements SavedGame { private final File wrapped; public GooglePlaySnapshotWrapper(File wrapped) { super(); this.wrapped = wrapped; } @Override public String getId() { return wrapped.getId(); } @Override public String getTitle() { return wrapped.getName(); } @Override public String getDescription() { return wrapped.getDescription(); } @Override public long getTimestamp() { DateTime time = wrapped.getModifiedTime(); return time == null ? -1 : time.getValue(); } @Override public long getPlayedTime() { // Last modified time - created time // Of course this is incorrect DateTime createdTime = wrapped.getCreatedTime(); return getTimestamp() - (createdTime == null ? -1 : createdTime.getValue()); } @Override public String getDeviceName() { return ""; } File getWrapped() { return wrapped; } }
3e11bff354f1acd483fbb5c7f78455476a07151d
168
java
Java
src/main/java/org/roy/kata16/external/RoyaltyService.java
patroytall/kata16
386d296cc5fab8a5608027955bcd6f0ad3181473
[ "Apache-2.0" ]
1
2019-03-28T11:29:11.000Z
2019-03-28T11:29:11.000Z
src/main/java/org/roy/kata16/external/RoyaltyService.java
patroytall/kata16
386d296cc5fab8a5608027955bcd6f0ad3181473
[ "Apache-2.0" ]
null
null
null
src/main/java/org/roy/kata16/external/RoyaltyService.java
patroytall/kata16
386d296cc5fab8a5608027955bcd6f0ad3181473
[ "Apache-2.0" ]
null
null
null
21
54
0.809524
7,490
package org.roy.kata16.external; import org.roy.kata16.entity.PackingSlip; public interface RoyaltyService { void registerPackingSlip(PackingSlip packingSlip); }
3e11c0491ed44f2ed26b46d36feb2a8cfdaf966f
411
java
Java
src/main/java/jp/co/tdc/jamcha/model/PackageName.java
tdc-yamada-ya/jamcha
1be35f85a6d68a194ab74fe5da1ffcfdfe59b672
[ "MIT" ]
2
2021-01-14T07:26:47.000Z
2021-06-22T13:32:08.000Z
src/main/java/jp/co/tdc/jamcha/model/PackageName.java
tdc-yamada-ya/jamcha
1be35f85a6d68a194ab74fe5da1ffcfdfe59b672
[ "MIT" ]
null
null
null
src/main/java/jp/co/tdc/jamcha/model/PackageName.java
tdc-yamada-ya/jamcha
1be35f85a6d68a194ab74fe5da1ffcfdfe59b672
[ "MIT" ]
null
null
null
19.571429
40
0.766423
7,491
package jp.co.tdc.jamcha.model; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.experimental.Accessors; @RequiredArgsConstructor @EqualsAndHashCode @Getter @Accessors(fluent = true) public class PackageName { @NonNull private final String value; @Override public String toString() { return value; } }
3e11c0e10aa9b71ff0205df2c56c7e5aae8d11bd
1,552
java
Java
android/src/main/java/br/com/queizysartori/flutterxmp/flutter_xmp/FlutterXmpPlugin.java
queizy-sartori/flutter-xmp
2710f44615ecaeb1fb2b75fecb452ef52ec013c9
[ "MIT" ]
null
null
null
android/src/main/java/br/com/queizysartori/flutterxmp/flutter_xmp/FlutterXmpPlugin.java
queizy-sartori/flutter-xmp
2710f44615ecaeb1fb2b75fecb452ef52ec013c9
[ "MIT" ]
1
2022-02-16T04:54:24.000Z
2022-02-19T15:59:27.000Z
android/src/main/java/br/com/queizysartori/flutterxmp/flutter_xmp/FlutterXmpPlugin.java
queizy-sartori/flutter-xmp
2710f44615ecaeb1fb2b75fecb452ef52ec013c9
[ "MIT" ]
null
null
null
30.431373
90
0.74549
7,492
package br.com.queizysartori.flutterxmp.flutter_xmp; import android.content.Context; import androidx.annotation.NonNull; import java.util.Map; import io.flutter.embedding.engine.plugins.FlutterPlugin; import io.flutter.plugin.common.MethodCall; import io.flutter.plugin.common.MethodChannel; import io.flutter.plugin.common.MethodChannel.MethodCallHandler; import io.flutter.plugin.common.MethodChannel.Result; import io.flutter.plugin.common.PluginRegistry.Registrar; /** FlutterXmpPlugin */ public class FlutterXmpPlugin implements FlutterPlugin, MethodCallHandler { private MethodChannel channel; private Context mContext; @Override public void onAttachedToEngine(@NonNull FlutterPluginBinding flutterPluginBinding) { mContext = flutterPluginBinding.getApplicationContext(); channel = new MethodChannel(flutterPluginBinding.getBinaryMessenger(), "flutter_xmp"); channel.setMethodCallHandler(this); } @Override public void onMethodCall(@NonNull MethodCall call, @NonNull final Result result) { if (call.method.equals("extractXmpFromRemote")) { RemoteImageXmpFetcher.fetch( call.<String>argument("url"), mContext, new MetadataCallbackImp() { @Override public void onSuccess(XmpResult xmpResult) { result.success(xmpResult.toMap()); } } ); } else { result.notImplemented(); } } @Override public void onDetachedFromEngine(@NonNull FlutterPluginBinding binding) { channel.setMethodCallHandler(null); } }
3e11c185ba7e2cb93b50dd4c41ebbef882f5a8ce
243
java
Java
FDBMS/src/JOptionPanel.java
idrice24/FamilyDBMS
a2b698e33fb58c6ef7ea36d9c098cb9c2a03b968
[ "Apache-2.0" ]
null
null
null
FDBMS/src/JOptionPanel.java
idrice24/FamilyDBMS
a2b698e33fb58c6ef7ea36d9c098cb9c2a03b968
[ "Apache-2.0" ]
1
2020-09-08T11:20:42.000Z
2020-09-08T11:20:42.000Z
FDBMS/src/JOptionPanel.java
idrice24/FamilyDBMS
a2b698e33fb58c6ef7ea36d9c098cb9c2a03b968
[ "Apache-2.0" ]
null
null
null
17.357143
79
0.683128
7,493
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ /** * * @author Lenovo */ class JOptionPanel { }
3e11c256ab65d599f472680b5f68d819e7bfee94
1,271
java
Java
dp/RodCutting.java
imshyam/algos
51e44d8e79a1abfb53203d0f12a5d46d6f90fe97
[ "MIT" ]
null
null
null
dp/RodCutting.java
imshyam/algos
51e44d8e79a1abfb53203d0f12a5d46d6f90fe97
[ "MIT" ]
null
null
null
dp/RodCutting.java
imshyam/algos
51e44d8e79a1abfb53203d0f12a5d46d6f90fe97
[ "MIT" ]
null
null
null
22.298246
89
0.579072
7,494
package algos.dp; import java.util.Map; import java.util.HashMap; import java.util.ArrayList; /** * * @author imshyam * */ /** Class contains : * 1. price * long as maximum revenue obtainable by cutting up the rod and selling the pieces * 2. result * ArrayList of length of the pieces */ public class RodCutting { // Maximum revenue obtainable by cutting up the rod and selling the pieces public long price; // Length of the pieces public ArrayList<Integer> result = new ArrayList<Integer>(); public void solve(int n, int[] p){ // Array for memoizing prices of smaller cuts int tempSol[] = new int[n + 1]; // Array for memoizing cut location int cutHere[] = new int[n + 1]; tempSol[0] = 0; for(int i = 1; i < n + 1; i ++) { // Temp price of the piece of rod int t = -1; // Temp location of the cut int t1 = i; for(int j = 1; j < i + 1; j++){ if(t < (p[j - 1] + tempSol[i - j])){ t1 = i - j; } t = max(t, p[j - 1] + tempSol[i - j]); } tempSol[i] = t; cutHere[i] = t1; } this.price = tempSol[n]; do { int res = cutHere[n] == 0 ? n - cutHere[n] : cutHere[n]; this.result.add(res); n -= res; } while(n > 0); } int max(int a, int b) { return a > b ? a : b; } }
3e11c3555caef8e31e7e201d396a0710442396da
2,001
java
Java
src/main/java/org/cherchgk/services/TournamentService.java
selenide-examples/chgk
fb6c3f97e03030e6e347eed3c2d50f0490131fca
[ "Apache-2.0" ]
7
2015-03-15T07:07:31.000Z
2022-01-07T09:22:23.000Z
src/main/java/org/cherchgk/services/TournamentService.java
selenide-examples/chgk
fb6c3f97e03030e6e347eed3c2d50f0490131fca
[ "Apache-2.0" ]
6
2016-06-21T09:35:55.000Z
2022-02-09T22:56:19.000Z
src/main/java/org/cherchgk/services/TournamentService.java
selenide-examples/chgk
fb6c3f97e03030e6e347eed3c2d50f0490131fca
[ "Apache-2.0" ]
1
2016-08-08T21:12:01.000Z
2016-08-08T21:12:01.000Z
35.839286
91
0.727454
7,495
/* * Copyright 2012-2019 Andrey Grigorov, Anton Grigorov * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.cherchgk.services; import org.cherchgk.domain.RightAnswer; import org.cherchgk.domain.TeamCategory; import org.cherchgk.domain.Tournament; import java.util.List; /** * Интерфейс сервиса для работы с хранилищем данных по * турнирам. * * @author Andrey Grigorov (ychag@example.com) */ public interface TournamentService extends DataService<Tournament> { /** * Получение порядкового номера для создаваемой команды. * * @param tournamentId Идентификатор турнира. * @return Порядковый номер команды. */ int getNextTeamNumber(long tournamentId); /** * Получить список всех правильных ответов, которые были * даны на турнире командами указанной категории. * Если категория команд не указана, то возвращается * список всех правильных ответов, данных на турнире. * * @param tournament Турнир, для которого следует получить список * всех правильных ответов. * @param teamCategory Категория команд. * @return Список всех правильных ответов {@link org.cherchgk.domain.RightAnswer}, * которые были даны на турнире для указанной категории команд. * Если категория команд не указана, то возвращается * список всех правильных ответов, данных на турнире. */ List<RightAnswer> getAllRightAnswers(Tournament tournament, TeamCategory teamCategory); }
3e11c3edfaa2f592c1a096d5be71c5c2c3df08a9
152
java
Java
src/main/java/com/dtcc/homeworktracker/util/SaveTasks.java
ashp1731/homeworktracker
48e9862db7bf8b2072ddae0f53d707db742736b1
[ "MIT" ]
null
null
null
src/main/java/com/dtcc/homeworktracker/util/SaveTasks.java
ashp1731/homeworktracker
48e9862db7bf8b2072ddae0f53d707db742736b1
[ "MIT" ]
null
null
null
src/main/java/com/dtcc/homeworktracker/util/SaveTasks.java
ashp1731/homeworktracker
48e9862db7bf8b2072ddae0f53d707db742736b1
[ "MIT" ]
null
null
null
13.818182
41
0.723684
7,496
package com.dtcc.homeworktracker.util; public class SaveTasks { public static void main(String[] args) { // TODO Auto-generated method stub } }
3e11c42b30c5cdaaa0067f4aed5f1e37fdc1adb1
2,617
java
Java
app/src/main/java/com/testerhome/nativeandroid/models/TesterUser.java
testerhome/A-Native-TesterHome
9e28727df584118105bc6b0e407dcfef1207fa97
[ "MIT" ]
33
2015-10-24T02:20:31.000Z
2019-05-03T13:39:13.000Z
app/src/main/java/com/testerhome/nativeandroid/models/TesterUser.java
testerhome/A-Native-TesterHome
9e28727df584118105bc6b0e407dcfef1207fa97
[ "MIT" ]
11
2015-11-05T00:21:25.000Z
2017-01-04T14:17:39.000Z
app/src/main/java/com/testerhome/nativeandroid/models/TesterUser.java
testerhome/A-Native-TesterHome
9e28727df584118105bc6b0e407dcfef1207fa97
[ "MIT" ]
31
2015-10-20T15:02:25.000Z
2020-10-14T01:31:27.000Z
19.385185
56
0.612533
7,497
package com.testerhome.nativeandroid.models; /** * Created by vclub on 15/10/13. */ public class TesterUser { private String id; private String login; private String name; private String avatar_url; private String location; private String website; private String github; private String email; private String company; private String access_token; private String tagline; private String refresh_token; private long expireDate; private long create_at; public long getCreate_at() { return create_at; } public void setCreate_at(long create_at) { this.create_at = create_at; } public String getRefresh_token() { return refresh_token; } public void setRefresh_token(String refresh_token) { this.refresh_token = refresh_token; } public long getExpireDate() { return expireDate; } public void setExpireDate(long expireDate) { this.expireDate = expireDate; } public String getTagline() { return tagline; } public void setTagline(String tagline) { this.tagline = tagline; } public String getCompany() { return company; } public void setCompany(String company) { this.company = company; } public String getId() { return id; } public void setId(String id) { this.id = id; } public String getLogin() { return login; } public void setLogin(String login) { this.login = login; } public String getName() { return name; } public void setName(String name) { this.name = name; } public String getAvatar_url() { return avatar_url; } public void setAvatar_url(String avatar_url) { this.avatar_url = avatar_url; } public String getLocation() { return location; } public void setLocation(String location) { this.location = location; } public String getWebsite() { return website; } public void setWebsite(String website) { this.website = website; } public String getGithub() { return github; } public void setGithub(String github) { this.github = github; } public String getEmail() { return email; } public void setEmail(String email) { this.email = email; } public String getAccess_token() { return access_token; } public void setAccess_token(String access_token) { this.access_token = access_token; } }
3e11c4b5021f748042f4211ad1810553f5e77aaa
1,074
java
Java
taotao-portal/src/main/java/com/taotao/portal/pojo/AdNode.java
luoyefeiwu/learn_java
1a98c3972188c19abbc7ae23e8f17536ff9a1fe6
[ "Apache-2.0" ]
null
null
null
taotao-portal/src/main/java/com/taotao/portal/pojo/AdNode.java
luoyefeiwu/learn_java
1a98c3972188c19abbc7ae23e8f17536ff9a1fe6
[ "Apache-2.0" ]
7
2021-01-20T22:51:44.000Z
2021-12-09T20:15:15.000Z
taotao-portal/src/main/java/com/taotao/portal/pojo/AdNode.java
luoyefeiwu/learn_java
1a98c3972188c19abbc7ae23e8f17536ff9a1fe6
[ "Apache-2.0" ]
null
null
null
17.047619
38
0.678771
7,498
package com.taotao.portal.pojo; public class AdNode { private int height; private int width; private String src; private int heightB; private int widthB; private String alt; private String srcB; private String href; public String getAlt() { return alt; } public void setAlt(String alt) { this.alt = alt; } public int getHeight() { return height; } public void setHeight(int height) { this.height = height; } public int getWidth() { return width; } public void setWidth(int width) { this.width = width; } public String getSrc() { return src; } public void setSrc(String src) { this.src = src; } public int getHeightB() { return heightB; } public void setHeightB(int heightB) { this.heightB = heightB; } public int getWidthB() { return widthB; } public void setWidthB(int widthB) { this.widthB = widthB; } public String getSrcB() { return srcB; } public void setSrcB(String srcB) { this.srcB = srcB; } public String getHref() { return href; } public void setHref(String href) { this.href = href; } }
3e11c57434afc993aa8d709ee9511ca3027dca88
434
java
Java
src/java/com/fdorigo/rmfly/wicket/FooterPanel.java
fdorigo/rmfly
466eabbd23def129552fd0a6367b09fb510fcca3
[ "Apache-2.0" ]
null
null
null
src/java/com/fdorigo/rmfly/wicket/FooterPanel.java
fdorigo/rmfly
466eabbd23def129552fd0a6367b09fb510fcca3
[ "Apache-2.0" ]
1
2015-05-26T21:14:53.000Z
2015-05-26T21:14:53.000Z
src/java/com/fdorigo/rmfly/wicket/FooterPanel.java
fdorigo/rmfly
466eabbd23def129552fd0a6367b09fb510fcca3
[ "Apache-2.0" ]
null
null
null
16.692308
49
0.642857
7,499
/* * FooterPanel.java * * Created on May 5, 2015, 1:37 PM */ package com.fdorigo.rmfly.wicket; import org.apache.wicket.markup.html.basic.Label; import org.apache.wicket.markup.html.panel.Panel; /** * * @author fdorigo * @version */ public final class FooterPanel extends Panel { public FooterPanel(String id, String text) { super(id); add(new Label("footerpanel_text", text)); } }
3e11c58d9447b082f96ba55f974967f6dd0c38a4
2,776
java
Java
backend/src/main/java/pt/ulisboa/tecnico/socialsoftware/tutor/answer/domain/QuestionAnswerItem.java
ritosilva/quizzes-tutor
5995988ebbdee2699ce849e0b321d8cce9615167
[ "MIT" ]
31
2020-02-07T14:09:39.000Z
2022-03-31T21:49:46.000Z
backend/src/main/java/pt/ulisboa/tecnico/socialsoftware/tutor/answer/domain/QuestionAnswerItem.java
ritosilva/quizzes-tutor
5995988ebbdee2699ce849e0b321d8cce9615167
[ "MIT" ]
280
2020-01-28T12:31:09.000Z
2022-03-31T21:14:16.000Z
backend/src/main/java/pt/ulisboa/tecnico/socialsoftware/tutor/answer/domain/QuestionAnswerItem.java
ritosilva/quizzes-tutor
5995988ebbdee2699ce849e0b321d8cce9615167
[ "MIT" ]
66
2020-02-17T17:38:41.000Z
2022-03-16T13:57:05.000Z
27.485149
90
0.692723
7,500
package pt.ulisboa.tecnico.socialsoftware.tutor.answer.domain; import pt.ulisboa.tecnico.socialsoftware.tutor.answer.dto.StatementAnswerDto; import pt.ulisboa.tecnico.socialsoftware.tutor.question.domain.QuestionDetails; import pt.ulisboa.tecnico.socialsoftware.tutor.utils.DateHandler; import javax.persistence.*; import java.time.LocalDateTime; @Entity @Table(name = "question_answer_items", indexes = { @Index(name = "question_answer_items_indx_0", columnList = "quiz_id"), }) @Inheritance(strategy = InheritanceType.SINGLE_TABLE) @DiscriminatorColumn(name = "question_answer_type", columnDefinition = "varchar(32) not null default 'multiple_choice'", discriminatorType = DiscriminatorType.STRING) public abstract class QuestionAnswerItem { @Id @GeneratedValue(strategy = GenerationType.IDENTITY) private Integer id; @Column(name = "quiz_id") private Integer quizId; private String username; private Integer quizQuestionId; private LocalDateTime answerDate; private Integer timeTaken; private Integer timeToSubmission; protected QuestionAnswerItem() { } protected QuestionAnswerItem(String username, int quizId, StatementAnswerDto answer) { this.username = username; this.quizId = quizId; this.quizQuestionId = answer.getQuizQuestionId(); this.answerDate = DateHandler.now(); this.timeTaken = answer.getTimeTaken(); this.timeToSubmission = answer.getTimeToSubmission(); } public Integer getId() { return id; } public void setId(Integer id) { this.id = id; } public Integer getQuizId() { return quizId; } public void setQuizId(Integer quizId) { this.quizId = quizId; } public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public Integer getQuizQuestionId() { return quizQuestionId; } public void setQuizQuestionId(Integer quizQuestionId) { this.quizQuestionId = quizQuestionId; } public LocalDateTime getAnswerDate() { return answerDate; } public void setAnswerDate(LocalDateTime answerDate) { this.answerDate = answerDate; } public Integer getTimeTaken() { return timeTaken; } public void setTimeTaken(Integer timeTaken) { this.timeTaken = timeTaken; } public Integer getTimeToSubmission() { return timeToSubmission; } public void setTimeToSubmission(Integer timeToSubmission) { this.timeToSubmission = timeToSubmission; } public abstract String getAnswerRepresentation(QuestionDetails questionDetails); }
3e11c635bc9dcf208ed9ed69241ab04c88f20d3d
7,159
java
Java
android/app/src/main/java/me/ranmocy/rcaltrain/ScheduleLoader.java
ranmocy/rCaltrain
fca4e03bbbe6cfd69ac95669996ffa7bd3e420c7
[ "MIT" ]
8
2015-05-15T20:40:07.000Z
2019-12-20T16:02:49.000Z
android/app/src/main/java/me/ranmocy/rcaltrain/ScheduleLoader.java
ranmocy/rCaltrain
fca4e03bbbe6cfd69ac95669996ffa7bd3e420c7
[ "MIT" ]
12
2016-04-02T02:30:16.000Z
2022-02-26T01:18:52.000Z
android/app/src/main/java/me/ranmocy/rcaltrain/ScheduleLoader.java
ranmocy/rCaltrain
fca4e03bbbe6cfd69ac95669996ffa7bd3e420c7
[ "MIT" ]
8
2015-05-15T20:40:13.000Z
2019-08-06T17:59:30.000Z
33.92891
100
0.665456
7,501
package me.ranmocy.rcaltrain; import android.content.Context; import android.util.Log; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import java.io.IOException; import java.io.InputStream; import java.net.URL; import java.net.URLConnection; import java.util.ArrayList; import java.util.Calendar; import java.util.Iterator; import java.util.List; import androidx.annotation.RawRes; import me.ranmocy.rcaltrain.database.ScheduleDatabase; import me.ranmocy.rcaltrain.database.Service; import me.ranmocy.rcaltrain.database.ServiceDate; import me.ranmocy.rcaltrain.database.Station; import me.ranmocy.rcaltrain.database.Stop; import me.ranmocy.rcaltrain.database.Trip; import me.ranmocy.rcaltrain.models.DayTime; public final class ScheduleLoader { private static final String TAG = "DataLoader"; public static void loadFromRemote(Context context) { try { List<Service> services = getCalendar(getRemoteFile("calendar.json")); List<ServiceDate> serviceDates = getCalendarDates(getRemoteFile("calendar_dates.json")); List<Station> stations = getStations(getRemoteFile("stops.json")); Routes routes = getRoutes(getRemoteFile("routes.json")); ScheduleDatabase.get(context) .updateData(stations, services, serviceDates, routes.trips, routes.stops); Log.i(TAG, "Data loaded."); } catch (IOException | JSONException e) { Log.e(TAG, "Failed load from remote", e); throw new RuntimeException(e); } } public static void load(Context context, ScheduleDatabase db) { try { List<Service> services = getCalendar(getFile(context, R.raw.calendar)); List<ServiceDate> serviceDates = getCalendarDates(getFile(context, R.raw.calendar_dates)); List<Station> stations = getStations(getFile(context, R.raw.stops)); Routes routes = getRoutes(getFile(context, R.raw.routes)); db.updateData(stations, services, serviceDates, routes.trips, routes.stops); Log.i(TAG, "Data loaded."); } catch (IOException | JSONException e) { Log.e(TAG, "Failed loading", e); // TODO: show dialog throw new RuntimeException(e); } } /** * calendar: service_id => {weekday: bool, saturday: bool, sunday: bool, start_date: date, * end_date: date} CT-16APR-Caltrain-Weekday-01 => {weekday: false, saturday: true, sunday: false, * start_date: 20160404, end_date: 20190331} */ private static List<Service> getCalendar(String calendar) throws JSONException { List<Service> services = new ArrayList<>(); JSONObject json = new JSONObject(calendar); for (Iterator<String> it = json.keys(); it.hasNext(); ) { String serviceId = it.next(); JSONObject s = json.getJSONObject(serviceId); boolean weekday = s.getBoolean("weekday"); boolean saturday = s.getBoolean("saturday"); boolean sunday = s.getBoolean("sunday"); Calendar startDate = getDate(s.getInt("start_date")); Calendar endDate = getDate(s.getInt("end_date")); services.add(new Service(serviceId, weekday, saturday, sunday, startDate, endDate)); } return services; } /** * calendar_dates: service_id => [[date, exception_type]] CT-16APR-Caltrain-Weekday-01 => * [[20160530,2]] */ private static List<ServiceDate> getCalendarDates(String jsonStr) throws JSONException { List<ServiceDate> serviceDates = new ArrayList<>(); JSONObject json = new JSONObject(jsonStr); for (Iterator<String> it = json.keys(); it.hasNext(); ) { String serviceId = it.next(); JSONArray dateJSON = json.getJSONArray(serviceId); for (int i = 0; i < dateJSON.length(); i++) { JSONArray pairJSON = dateJSON.getJSONArray(i); Calendar date = getDate(pairJSON.getInt(0)); int type = pairJSON.getInt(1); serviceDates.add(new ServiceDate(serviceId, date, type)); } } return serviceDates; } /** stop_name => [stop_id1, stop_id2] "San Francisco" => [70021, 70022] */ private static List<Station> getStations(String jsonStr) throws JSONException { List<Station> stations = new ArrayList<>(); JSONObject json = new JSONObject(jsonStr); for (Iterator<String> it = json.keys(); it.hasNext(); ) { String name = it.next(); JSONArray ids = json.getJSONArray(name); for (int i = 0; i < ids.length(); i++) { int id = ids.getInt(i); stations.add(new Station(id, name)); } } return stations; } private static final class Routes { final List<Trip> trips; final List<Stop> stops; private Routes(List<Trip> trips, List<Stop> stops) { this.trips = trips; this.stops = stops; } } /** * routes: { route_id => { service_id => { trip_id => [[station_id, * arrival_time/departure_time(seconds) ]] } } } { "Bullet" => { "CT-14OCT-XXX" => { * "650770-CT-14OCT-XXX" => [[70012, 29700], ...] } } } */ private static Routes getRoutes(String jsonStr) throws JSONException { List<Trip> tripList = new ArrayList<>(); List<Stop> stopList = new ArrayList<>(); JSONObject routes = new JSONObject(jsonStr); for (Iterator<String> routeIds = routes.keys(); routeIds.hasNext(); ) { String routeId = routeIds.next(); JSONObject services = routes.getJSONObject(routeId); for (Iterator<String> serviceIds = services.keys(); serviceIds.hasNext(); ) { String serviceId = serviceIds.next(); JSONObject trips = services.getJSONObject(serviceId); for (Iterator<String> tripIds = trips.keys(); tripIds.hasNext(); ) { String tripId = tripIds.next(); JSONArray stops = trips.getJSONArray(tripId); tripList.add(new Trip(tripId, serviceId)); for (int index = 0; index < stops.length(); index++) { JSONArray stop = stops.getJSONArray(index); int stationId = stop.getInt(0); DayTime time = new DayTime(stop.getLong(1)); stopList.add(new Stop(tripId, index, stationId, time)); } } } } return new Routes(tripList, stopList); } private static Calendar getDate(int dateInt) { int year = dateInt / 10000; int month = dateInt / 100 % 100 - 1; // month is 0-based int day = dateInt % 100; Calendar calendar = Calendar.getInstance(); calendar.clear(); calendar.set(year, month, day); return calendar; } private static String getFile(Context context, @RawRes int resId) throws IOException { InputStream is = context.getResources().openRawResource(resId); int size = is.available(); byte[] buffer = new byte[size]; is.read(buffer); is.close(); return new String(buffer, "UTF-8"); } private static String getRemoteFile(String fileName) throws IOException { URL url = new URL("https://rcaltrain.com/data/" + fileName); URLConnection connection = url.openConnection(); connection.connect(); InputStream is = connection.getInputStream(); int size = is.available(); byte[] buffer = new byte[size]; is.read(buffer); is.close(); return new String(buffer, "UTF-8"); } }
3e11c6bb89bb65ec14fdae0130e8675bb9397865
195
java
Java
actors/src/main/java/com/droidkit/actors/tasks/AskCancelledException.java
actorapp/droidkit-actors
fdb72fcfdd1c5e54a970f203a33a71fa54344217
[ "MIT" ]
14
2015-02-13T20:49:11.000Z
2018-10-05T16:29:34.000Z
actors/src/main/java/com/droidkit/actors/tasks/AskCancelledException.java
actorapp/droidkit-actors
fdb72fcfdd1c5e54a970f203a33a71fa54344217
[ "MIT" ]
2
2016-06-25T11:22:59.000Z
2017-11-27T12:38:35.000Z
actors/src/main/java/com/droidkit/actors/tasks/AskCancelledException.java
actorapp/droidkit-actors
fdb72fcfdd1c5e54a970f203a33a71fa54344217
[ "MIT" ]
14
2015-03-06T00:51:43.000Z
2022-03-15T02:56:15.000Z
17.363636
54
0.748691
7,502
package com.droidkit.actors.tasks; /** * Exception about cancelling task * * @author Stepan Ex3NDR Korshakov (hzdkv@example.com) */ public class AskCancelledException extends Exception { }
3e11c735128f2b0b1fb62e47a1609239f32c6900
4,080
java
Java
sermant-agentcore/sermant-agentcore-core/src/main/java/com/huaweicloud/sermant/core/service/dynamicconfig/common/DynamicConfigEvent.java
pengyuyii/java-mesh
60fc9cabc05659a610e05649dc22a16bec2f9b1c
[ "Apache-2.0" ]
27
2021-12-13T19:35:22.000Z
2022-03-31T03:24:34.000Z
sermant-agentcore/sermant-agentcore-core/src/main/java/com/huaweicloud/sermant/core/service/dynamicconfig/common/DynamicConfigEvent.java
pengyuyii/java-mesh
60fc9cabc05659a610e05649dc22a16bec2f9b1c
[ "Apache-2.0" ]
147
2021-12-13T13:25:38.000Z
2022-03-31T11:17:26.000Z
sermant-agentcore/sermant-agentcore-core/src/main/java/com/huaweicloud/sermant/core/service/dynamicconfig/common/DynamicConfigEvent.java
pengyuyii/java-mesh
60fc9cabc05659a610e05649dc22a16bec2f9b1c
[ "Apache-2.0" ]
19
2021-12-16T11:24:12.000Z
2022-03-31T02:44:33.000Z
28.137931
114
0.630882
7,503
/* * Copyright (C) 2021-2021 Huawei Technologies Co., Ltd. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.huaweicloud.sermant.core.service.dynamicconfig.common; import java.util.EventObject; import java.util.Objects; /** * An event raised when the config changed, immutable. * * @author yangyshdan, HapThorin * @version 1.0.0 * @see DynamicConfigEventType * @since 2021-12-27 */ public class DynamicConfigEvent extends EventObject { private static final long serialVersionUID = 8199411666187944757L; private final String key; private final String group; private final String content; private final DynamicConfigEventType eventType; /** * 构造器 * * @param key 配置键 * @param group 组 * @param content 配置内容 * @param eventType 事件类型 */ public DynamicConfigEvent(String key, String group, String content, DynamicConfigEventType eventType) { super(key + "," + group); this.key = key; this.group = group; this.content = content; this.eventType = eventType; } public String getKey() { return key; } public String getGroup() { return group; } public String getContent() { return content; } public DynamicConfigEventType getEventType() { return eventType; } @Override public String toString() { return "DynamicConfigEvent{" + "key='" + key + '\'' + ", group='" + group + '\'' + ", content='" + content + '\'' + ", eventType=" + eventType + "} " + super.toString(); } @Override public boolean equals(Object target) { if (this == target) { return true; } if (!(target instanceof DynamicConfigEvent)) { return false; } DynamicConfigEvent that = (DynamicConfigEvent) target; return Objects.equals(getKey(), that.getKey()) && Objects.equals(getGroup(), that.getGroup()) && Objects.equals(getContent(), that.getContent()) && getEventType() == that.getEventType(); } @Override public int hashCode() { return Objects.hash(getKey(), getGroup(), getContent(), getEventType()); } /** * 构建初始化动态配置事件 * * @param key 配置键 * @param group 分组 * @param content 配置信息 * @return 初始化动态配置事件 */ public static DynamicConfigEvent initEvent(String key, String group, String content) { return new DynamicConfigEvent(key, group, content, DynamicConfigEventType.INIT); } /** * 构建创建配置事件 * * @param key 配置键 * @param group 分组 * @param content 配置信息 * @return 创建配置事件 */ public static DynamicConfigEvent createEvent(String key, String group, String content) { return new DynamicConfigEvent(key, group, content, DynamicConfigEventType.CREATE); } /** * 构建修改配置事件 * * @param key 配置键 * @param group 分组 * @param content 配置信息 * @return 修改配置事件 */ public static DynamicConfigEvent modifyEvent(String key, String group, String content) { return new DynamicConfigEvent(key, group, content, DynamicConfigEventType.MODIFY); } /** * 构建删除配置事件 * * @param key 配置键 * @param group 分组 * @param content 配置信息 * @return 删除配置事件 */ public static DynamicConfigEvent deleteEvent(String key, String group, String content) { return new DynamicConfigEvent(key, group, content, DynamicConfigEventType.DELETE); } }
3e11c73c54fc03f3df7adfca81cbacf635920678
22,273
java
Java
src/me/regexp/CharacterClassMap3.java
Walkline80/Autohome
e6309046e13b04f17665de7e3c7a3ad23a3decb2
[ "MIT" ]
1
2019-04-01T09:27:49.000Z
2019-04-01T09:27:49.000Z
src/me/regexp/CharacterClassMap3.java
Walkline80/Autohome
e6309046e13b04f17665de7e3c7a3ad23a3decb2
[ "MIT" ]
null
null
null
src/me/regexp/CharacterClassMap3.java
Walkline80/Autohome
e6309046e13b04f17665de7e3c7a3ad23a3decb2
[ "MIT" ]
null
null
null
71.159744
86
0.379563
7,504
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package me.regexp; //#ifdef RE_UNICODE //# /** //# * //# * @author Nikolay Neizvesny //# */ //# class CharacterClassMap3 { //# static final byte[] CHAR_CLASSES_15 = { //# 27, 27, 27, 27, 0, 27, 27, 27, 27, 0, 0, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 0, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 0, 27, 0, 27, 27, 27, 27, 0, 0, 0, 27, 0, 27, 27, 27, 27, 27, 27, 27, //# 0, 0, 27, 27, 27, 27, 27, 27, 27, 20, 21, 20, 21, 20, 21, 20, 21, 20, 21, //# 20, 21, 20, 21, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, //# 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 27, 0, 0, //# 0, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 0, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 0, 24, 24, 24, 24, 24, 20, 21, 24, 24, 24, 24, 0, 0, 0, 0, 0, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 20, 21, 20, 21, 20, 21, 0, 0, 0, 0, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 20, 21, 20, 21, 20, 21, 20, 21, 20, 21, 20, 21, 20, 21, 20, 21, 20, //# 21, 20, 21, 20, 21, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 20, 21, 20, 21, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 20, 21, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, //# 24, 24, 24, 24, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 27, 27, //# 27, 27, //# }; //# static final byte[] CHAR_CLASSES_16 = { //# 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, //# 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, //# 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, //# 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 1, 2, 1, 1, //# 1, 2, 2, 1, 2, 1, 2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 2, 1, 2, 2, 0, 0, 0, 0, 0, //# 0, 0, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, //# 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, //# 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, //# 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, //# 2, 1, 2, 2, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //# 0, 23, 23, 23, 23, 11, 23, 23, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, //# 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, //# 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, //# 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, //# 5, 5, 5, 0, 5, 5, 5, 5, 5, 5, 5, 0, 5, 5, 5, 5, 5, 5, 5, 0, 5, 5, 5, 5, 5, //# 5, 5, 0, 5, 5, 5, 5, 5, 5, 5, 0, 5, 5, 5, 5, 5, 5, 5, 0, 5, 5, 5, 5, 5, 5, //# 5, 0, 5, 5, 5, 5, 5, 5, 5, //# }; //# static final byte[] CHAR_CLASSES_17 = { //# 23, 23, 28, 29, 28, 29, 23, 23, 23, 28, 29, 23, 28, 29, 23, 23, 23, 23, //# 23, 23, 23, 23, 23, 19, 0, 0, 0, 0, 28, 29, //# }; //# static final byte[] CHAR_CLASSES_18 = { //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 0, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# }; //# static final byte[] CHAR_CLASSES_19 = { //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 12, 23, 23, //# 23, 27, 4, 5, 10, 20, 21, 20, 21, 20, 21, 20, 21, 20, 21, 27, 27, 20, 21, //# 20, 21, 20, 21, 20, 21, 19, 20, 21, 21, 27, 10, 10, 10, 10, 10, 10, 10, //# 10, 10, 6, 6, 6, 6, 6, 6, 19, 4, 4, 4, 4, 4, 27, 27, 10, 10, 10, 4, 5, 23, //# 27, 27, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0, 0, 6, 6, 26, 26, 4, 4, 5, //# 19, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 23, 4, 4, 4, 5, 0, 0, //# 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0, 0, 0, 0, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0, 27, 27, 11, 11, 11, 11, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# }; //# static final byte[] CHAR_CLASSES_20 = { //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 0, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27, 11, //# 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 11, 11, 11, 11, 11, 11, 11, 11, //# 11, 11, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, //# 11, 11, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 5, //# }; //# static final byte[] CHAR_CLASSES_21 = { //# 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 5, //# }; //# static final byte[] CHAR_CLASSES_22 = { //# 5, //# }; //# static final byte[] CHAR_CLASSES_23 = { //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0, 0, 0, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, //# }; //# static final byte[] CHAR_CLASSES_24 = { //# 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, //# 26, 26, 26, 26, 26, 4, 4, 4, 4, 0, 0, 0, 0, 0, 26, 26, //# }; //# static final byte[] CHAR_CLASSES_25 = { //# 5, 5, 8, 5, 5, 5, 6, 5, 5, 5, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 8, 8, 6, 6, 8, 27, 27, 27, 27, //# }; //# static final byte[] CHAR_CLASSES_26 = { //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 23, 23, 23, 23, //# }; //# static final byte[] CHAR_CLASSES_27 = { //# 5, //# }; //# static final byte[] CHAR_CLASSES_28 = { //# 5, //# }; //# static final byte[] CHAR_CLASSES_29 = { //# 18, //# }; //# static final byte[] CHAR_CLASSES_30 = { //# 18, 18, //# }; //# static final byte[] CHAR_CLASSES_31 = { //# 18, 18, //# }; //# static final byte[] CHAR_CLASSES_32 = { //# 18, 17, //# }; //# static final byte[] CHAR_CLASSES_33 = { //# 17, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, //# }; //# static final byte[] CHAR_CLASSES_34 = { //# 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 0, //# 0, 0, 0, 0, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 24, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 0, 5, 5, 5, 5, 5, 0, 5, 0, 5, 5, 0, 5, 5, 0, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, //# 5, 5, 5, 5, //# }; //# } //#endif
3e11c76f1636a736f20be65bebea970cf5c1cfa3
7,662
java
Java
ServerTools/src/me/reecepbcups/events/CMDAlias.java
Wesley51/ServerTools-MC
54875bb0f31ffb2f05d5d843ee30ced60e57808c
[ "Apache-2.0" ]
null
null
null
ServerTools/src/me/reecepbcups/events/CMDAlias.java
Wesley51/ServerTools-MC
54875bb0f31ffb2f05d5d843ee30ced60e57808c
[ "Apache-2.0" ]
null
null
null
ServerTools/src/me/reecepbcups/events/CMDAlias.java
Wesley51/ServerTools-MC
54875bb0f31ffb2f05d5d843ee30ced60e57808c
[ "Apache-2.0" ]
null
null
null
34.669683
129
0.674367
7,505
package me.reecepbcups.events; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import org.bukkit.Bukkit; import org.bukkit.Location; import org.bukkit.configuration.ConfigurationSection; import org.bukkit.entity.Player; import org.bukkit.event.EventHandler; import org.bukkit.event.EventPriority; import org.bukkit.event.Listener; import org.bukkit.event.player.PlayerCommandPreprocessEvent; import org.bukkit.scheduler.BukkitRunnable; import me.reecepbcups.tools.Main; import me.reecepbcups.utiltools.Util; public class CMDAlias implements Listener { public ConfigurationSection Alises; private static List<String> Disabled = Main.MAINCONFIG.getStringList("Misc.CMDAliases.disabled"); private static HashMap<String, List<String>> worlddisabled; // world: [cmd, 5] private static HashMap<String, HashMap<String, Integer>> preWorldCooldown = new HashMap<String, HashMap<String,Integer>>(); private boolean stopIfMoved = false; //public FileConfiguration config; private String aliasResult, userArguments, permission; public Main plugin; public CMDAlias(Main instance) { plugin = instance; if (plugin.EnabledInConfig("Misc.CMDAliases.Enabled")) { //config = plugin.getConfigFile("config.yml"); permission = Main.MAINCONFIG.getString("Misc.CMDAliases.Permission"); Alises = Main.MAINCONFIG.getConfigurationSection("Misc.CMDAliases.cmds"); //Disabled = Main.MAINCONFIG.getStringList("Misc.CMDAliases.disabled"); // every 15 mins it refreshes this Bukkit.getServer().getScheduler().scheduleSyncRepeatingTask(plugin, new Runnable() { public void run() { saveDisabledCommands(); } }, 0, 900*20L); // 15 minutes with initial delay of 0 seconds (run now) // on command run, get player location, wait X seconds, if they have not moved allowed command to be run. if(Main.MAINCONFIG.contains("Misc.CMDAliases.preCooldownCommands")) { stopIfMoved = Main.MAINCONFIG.getBoolean("Misc.CMDAliases.preCooldownCommands.stopIfMoved"); //Util.consoleMSG("&aloaded preCooldownCommands stopifMoved"); for(String world : Main.MAINCONFIG.getConfigurationSection("Misc.CMDAliases.preCooldownCommands").getKeys(false)) { HashMap<String, Integer> tempHoldCommands = new HashMap<String, Integer>(); if(!world.equalsIgnoreCase("stopIfMoved")) { for(String cmd : Main.MAINCONFIG.getStringList("Misc.CMDAliases.preCooldownCommands."+world)) { String command = cmd.split("%")[0]; Integer timeWait = Integer.valueOf(cmd.split("%")[1]); //Util.consoleMSG("&eloaded "+command+" for " + timeWait + " seconds for "+world); tempHoldCommands.put(command.toLowerCase(), timeWait); } preWorldCooldown.put(world, tempHoldCommands); } } } else { Util.consoleMSG("&c[!] Add the following into your config (Misc.CMDAliases)"); Util.consoleMSG(" preCooldownCommands:\r\n" + " stopIfMoved: true\r\n" + " warzone:\r\n" + " - spawn%5\r\n" + " - tpyes%5"); } Bukkit.getServer().getPluginManager().registerEvents(this, plugin); } } // saves all commands which should be disabled to the list. // Every 15 mins this is refreshed to make sure it doesnt unload public void saveDisabledCommands() { Util.consoleMSG("&aServerTools - Refreshed DisabledCommands to hash"); // new init here so it clears previous worlddisabled = new HashMap<String, List<String>>(); if(Main.MAINCONFIG.contains("Misc.CMDAliases.disabledWorlds")) { // gets worlds to disable specific cmds (Grabs exact copy bc sometimes is weird with this) for(String world : Main.MAINCONFIG.getConfigurationSection("Misc.CMDAliases.disabledWorlds").getKeys(false)) { if(Bukkit.getWorld(world) != null) { //Util.consoleMSG("World " + world + " Found for CMDAlias Disable"); List<String> l = new ArrayList<String>(); // if world is real, block all commands for(String blockCMD : Main.MAINCONFIG.getStringList("Misc.CMDAliases.disabledWorlds."+world)) { l.add(blockCMD.toLowerCase()); } worlddisabled.put(world, l); } else { Util.consoleMSG("&cWORLD: " + world + " in CMDDisabler is not valid!"); } } } else { Util.consoleMSG("&c[!] Add the following into your config (Misc.CMDAliases)\n disabledWorlds:\n WORLD:\n - cmd"); Util.consoleMSG("\r\n" + "\r\n" + ""); } } String command; @EventHandler(ignoreCancelled = false, priority = EventPriority.HIGHEST) public void onCommand(PlayerCommandPreprocessEvent e) { if (!(e.getMessage().length() > 1)) { return; } command = e.getMessage().substring(1).split(" ")[0].toLowerCase(); Player p = e.getPlayer(); String world = p.getLocation().getWorld().getName(); // if there are any keys if(worlddisabled.keySet().size() > 0) { // if player in world which there is a key for if(worlddisabled.keySet().contains(world)) { // if the cmd they ran is in the list disabled for that world if(worlddisabled.get(world).contains(command)) { // cancel if no bypass perm if(!e.getPlayer().hasPermission(permission)) { e.setCancelled(true); Util.coloredMessage(e.getPlayer(), Main.LANG("CMDALIAS_DENYWORLD").replace("%cmd%", command)); return; } else { Util.coloredMessage(e.getPlayer(), "&7&oBypassing command for disable world due to perm"); } } } } // DISABLED COMMANDS if(Disabled.contains(command)) { if(!e.getPlayer().hasPermission(permission)) { e.setCancelled(true); Util.coloredMessage(e.getPlayer(), Main.LANG("CMDALIAS_DISABLED").replace("%cmd%", command)); return; } } //Util.consoleMSG(preWorldCooldown.keySet().toString()); if(preWorldCooldown.containsKey(world)) { //Util.consoleMSG("&e"+p.getName()+" is in the world!"); // update command WITH spaces command = e.getMessage().substring(1); // removes / //Util.consoleMSG(command); // if that world has a command which is suppose to be on preCooldown //Util.consoleMSG(preWorldCooldown.get(world).toString()); if(preWorldCooldown.get(world).keySet().contains(command)) { if(p.hasPermission(permission)) { Util.coloredMessage(p, "&7&oBypassing PreCommand Cooldown due to being staff"); return; } else { e.setCancelled(true); } //Util.consoleMSG(p.getName()+" is in world " + world); final Location loc = p.getLocation(); int sec = preWorldCooldown.get(world).get(command); if(!p.hasPermission(permission)) { Util.coloredMessage(p, Main.LANG("CMDALIAS_DELAYED").replace("%cmd%", command).replace("%time%", sec+"")); new BukkitRunnable() { @Override public void run() { if(stopIfMoved) { if(loc.getBlockX() != p.getLocation().getBlockX() || loc.getBlockZ() != p.getLocation().getBlockZ()) { Util.coloredMessage(p, Main.LANG("CMDALIAS_DELAYED_MOVED")); return; } } // returns original command they wanted to run p.performCommand(e.getMessage().substring(1)); return; } }.runTaskLater(plugin, sec*20); // should get the INT value from the hashmap } } } if(Alises.contains(command)) { aliasResult = Main.MAINCONFIG.getString("Misc.CMDAliases.cmds."+command); userArguments = e.getMessage().substring(command.length() + 1) .replaceAll("%player%", e.getPlayer().getName()); e.setMessage(e.getMessage().substring(0, 1) + aliasResult + userArguments); } } }
3e11c8bb59854b240dd826f1499cc89688644728
956
java
Java
src/main/java/org/oidc/msg/SerializationException.java
NimbleIDM/javaOIDCMessage
db18f82326b2b0b5cdd30476561c1c4abd53aec8
[ "Apache-2.0" ]
null
null
null
src/main/java/org/oidc/msg/SerializationException.java
NimbleIDM/javaOIDCMessage
db18f82326b2b0b5cdd30476561c1c4abd53aec8
[ "Apache-2.0" ]
null
null
null
src/main/java/org/oidc/msg/SerializationException.java
NimbleIDM/javaOIDCMessage
db18f82326b2b0b5cdd30476561c1c4abd53aec8
[ "Apache-2.0" ]
null
null
null
30.83871
92
0.73954
7,506
/* * Copyright (C) 2018 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.oidc.msg; /** * An exception that is thrown when there is an issue with serialization of the Message type */ public class SerializationException extends Exception { public SerializationException(String message) { this(message, null); } public SerializationException(String message, Throwable cause) { super(message, cause); } }
3e11ca01558d5365c973e96d1b3de96847a80d26
3,349
java
Java
test/main/com/ocaml/lang/utils/OCamlPsiUtilsTest.java
QuentinRa/ocaml-plugin-intellij
8c578585d524171097c2c6e8d8397ff5fa9e2ed1
[ "MIT" ]
1
2022-03-05T11:28:48.000Z
2022-03-05T11:28:48.000Z
test/main/com/ocaml/lang/utils/OCamlPsiUtilsTest.java
QuentinRa/ocaml-plugin-intellij
8c578585d524171097c2c6e8d8397ff5fa9e2ed1
[ "MIT" ]
52
2022-02-01T11:08:25.000Z
2022-03-31T20:42:43.000Z
test/main/com/ocaml/lang/utils/OCamlPsiUtilsTest.java
QuentinRa/intellij-ocaml-plugin
8c578585d524171097c2c6e8d8397ff5fa9e2ed1
[ "MIT" ]
1
2022-03-25T03:10:55.000Z
2022-03-25T03:10:55.000Z
36.802198
115
0.703494
7,507
package com.ocaml.lang.utils; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiFile; import com.ocaml.OCamlBaseTest; import com.or.lang.OCamlTypes; import com.or.lang.core.psi.PsiLet; import org.intellij.lang.annotations.Language; import org.junit.Test; import java.util.Set; @SuppressWarnings("JUnit4AnnotatedMethodInJUnit3TestCase") public class OCamlPsiUtilsTest extends OCamlBaseTest { // skipMeaninglessPreviousSibling @Test public void testSkipPrevious() { PsiElement psiElement = configureCodeWithCaret("let x = 5\n (*something*)\n (* something *)(*caret*)"); PsiElement prev = OCamlPsiUtils.skipMeaninglessPreviousSibling(psiElement); assertInstanceOf(prev, PsiLet.class); } @Test public void testSkipPreviousSpaces() { PsiElement psiElement = configureCodeWithCaret("let x = 5\n (*caret*)"); PsiElement prev = OCamlPsiUtils.skipMeaninglessPreviousSibling(psiElement); assertInstanceOf(prev, PsiLet.class); } @Test public void testSkipPreviousComment() { PsiElement psiElement = configureCodeWithCaret("let x = 5\n(*something*)(*caret*)"); PsiElement prev = OCamlPsiUtils.skipMeaninglessPreviousSibling(psiElement); assertInstanceOf(prev, PsiLet.class); } // skipMeaninglessPreviousSibling @Test public void testSkipNext() { PsiElement psiElement = configureCodeWithCaret("(*caret*) (*something*)\n (* something *)\nlet x = 5"); PsiElement prev = OCamlPsiUtils.skipMeaninglessNextSibling(psiElement); assertInstanceOf(prev, PsiLet.class); } @Test public void testSkipNextSpaces() { PsiElement psiElement = configureCodeWithCaret("(*caret*) \nlet x = 5"); PsiElement prev = OCamlPsiUtils.skipMeaninglessNextSibling(psiElement); assertInstanceOf(prev, PsiLet.class); } @Test public void testSkipNextComment() { PsiElement psiElement = configureCodeWithCaret("(*caret*)(*something*)\nlet x = 5"); PsiElement prev = OCamlPsiUtils.skipMeaninglessNextSibling(psiElement); assertInstanceOf(prev, PsiLet.class); } // getPsiFile @Test public void testGetPsiFile() { PsiFile file = myFixture.configureByText("editor.ml", ""); PsiFile psiFile = OCamlPsiUtils.getPsiFile(myFixture.getEditor()); assertNotNull(psiFile); assertSame(psiFile, file); } @Test // getNextMeaningfulSibling public void testGetNextMeaningfulSibling() { PsiElement psiElement = configureCodeWithCaret("(*caret*)( (*something*) )"); PsiElement rparen = OCamlPsiUtils.getNextMeaningfulSibling(psiElement, OCamlTypes.RPAREN); assertNotNull(rparen); assertEquals(OCamlTypes.RPAREN, rparen.getNode().getElementType()); assertTrue(OCamlPsiUtils.isNextMeaningfulNextSibling(psiElement, OCamlTypes.RPAREN)); } @Test // getPreviousMeaningfulSibling public void testGetPreviousMeaningfulSibling() { PsiElement psiElement = configureCodeWithCaret("( (*something*) )(*caret*)"); PsiElement lparen = OCamlPsiUtils.getPreviousMeaningfulSibling(psiElement, OCamlTypes.LPAREN); assertNotNull(lparen); assertEquals(OCamlTypes.LPAREN, lparen.getNode().getElementType()); } }
3e11caad40bd8ff10761d04e3915dbe3b6e4e048
1,075
java
Java
src/main/java/br/com/editora/casaDoCodigo/entidades/Autor.java
ludmilaaraujo/orange-talents-03-template-casa-do-codigo
2fb41d865fcc6cc45606c869fbdb32e4203d856d
[ "Apache-2.0" ]
null
null
null
src/main/java/br/com/editora/casaDoCodigo/entidades/Autor.java
ludmilaaraujo/orange-talents-03-template-casa-do-codigo
2fb41d865fcc6cc45606c869fbdb32e4203d856d
[ "Apache-2.0" ]
null
null
null
src/main/java/br/com/editora/casaDoCodigo/entidades/Autor.java
ludmilaaraujo/orange-talents-03-template-casa-do-codigo
2fb41d865fcc6cc45606c869fbdb32e4203d856d
[ "Apache-2.0" ]
null
null
null
21.5
57
0.616744
7,508
package br.com.editora.casaDoCodigo.entidades; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import java.time.LocalDateTime; @Entity public class Autor { @Id @GeneratedValue (strategy = GenerationType.IDENTITY) private Long id; private String nome; private String email; private String descricao; private LocalDateTime instante = LocalDateTime.now(); public Autor(String nome, String email, String descricao){ this.nome = nome; this.email = email; this.descricao = descricao; } @Deprecated public Autor(){ } @Override public String toString(){ return "Autor [nome=" + nome + ", email=" + email + ", " + "descricao= " + descricao +"]"; } public String getNome() { return nome; } public String getEmail() { return email; } public String getDescricao() { return descricao; } }
3e11cb1dcaed67fb650c8e65d6c5ebacdd6c986b
4,429
java
Java
main/java/com/xenonteam/xenonlib/util/java/StorageHelper.java
XenonTeam/XenonLib
4f7c9840a51fd99e3ed1dc2e03d57759be53b08f
[ "Apache-2.0" ]
null
null
null
main/java/com/xenonteam/xenonlib/util/java/StorageHelper.java
XenonTeam/XenonLib
4f7c9840a51fd99e3ed1dc2e03d57759be53b08f
[ "Apache-2.0" ]
null
null
null
main/java/com/xenonteam/xenonlib/util/java/StorageHelper.java
XenonTeam/XenonLib
4f7c9840a51fd99e3ed1dc2e03d57759be53b08f
[ "Apache-2.0" ]
null
null
null
21.293269
105
0.637616
7,509
/** * */ package com.xenonteam.xenonlib.util.java; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.util.ArrayList; /** * @author tim4242 * @author philipas * */ public class StorageHelper { /** * Serializes an {@link java.lang.Object Object} to a byte[] * * @param obj * The {@link java.lang.Object Object} * @return A byte[] representation of the {@link java.lang.Object Object} * @throws IOException */ public static byte[] serialize(Object obj) throws IOException { ByteArrayOutputStream out = new ByteArrayOutputStream(); ObjectOutputStream os = new ObjectOutputStream(out); os.writeObject(obj); byte[] b = out.toByteArray(); out.close(); os.close(); return b; } /** * Deserializes {@link java.lang.Object Objects} serialized by * {@link com.xenonteam.xenonlib.util.java.StorageHelper#serialize(java.lang.Object) * serialize(Object)} * * @param data * The Object as a byte[] * @return an * @throws IOException * @throws ClassNotFoundException */ public static Object deserialize(byte[] data) throws IOException, ClassNotFoundException { ByteArrayInputStream in = new ByteArrayInputStream(data); ObjectInputStream is = new ObjectInputStream(in); Object o = is.readObject(); in.close(); is.close(); return o; } /** * Writes an {@link java.lang.Object Object} to the specified * {@link java.io.File File} * * @param obj * The Object to write * @param f * The {@link java.io.File File} * @throws IOException */ public static void writeSerializedObject(Object obj, File f) throws IOException { File temp = f; if(!f.toString().contains(".")) { temp = new File(f + ".jobj"); } FileOutputStream out = new FileOutputStream(temp); byte[] ser = serialize(obj); byte[] ba = new byte[ser.length + 1]; ba[0] = new Integer(ser.length).byteValue(); for (int i = 0; i < ser.length; i++) { ba[i + 1] = ser[i]; } out.write(ba); out.close(); } /** * Reads the {@link java.io.File File} created by * {@link com.xenonteam.xenonlib.util.java.StorageHelper#writeSerialized(java.lang.Object, java.io.File) * writeSerialized()} * * @param f * The {@link java.io.File File} * @return * @throws ClassNotFoundException * @throws IOException */ public static Object readSearializedObject(File f) throws ClassNotFoundException, IOException { File temp = f; if(!f.toString().contains(".")) { temp = new File(f + ".jobj"); } byte[] lenB = new byte[1]; FileInputStream in = new FileInputStream(temp); in.read(lenB); int len = new Byte(lenB[0]).intValue(); byte[] obj = new byte[len]; in.read(obj); in.close(); return deserialize(obj); } public static Object[] readSearializedObjects(File file) { File temp = file; ArrayList<Object> object = new ArrayList<Object>(); if(temp.isDirectory()) { for (int i = 0; i < temp.list().length; i++) { File t = temp.listFiles()[i]; try { object.add(readSearializedObject(t)); } catch(ClassNotFoundException | IOException e) { e.printStackTrace(); } } } else { System.out.println("invalid file this is not a file directory"); } return object.toArray(); } public static void writeSerializedObjects(File f, String[] names, Object... obj) { if(names.length == obj.length) { for (int i = 0; i < obj.length; i++) { try { if(!(new File(f.getPath()).isDirectory())) new File(f.getPath()).mkdirs(); if(!(new File(f.getPath() + "/" + names[i] + ".jobj").exists())) new File(f.getPath() + "/" + names[i] + ".jobj").createNewFile(); writeSerializedObject(obj[i], new File(f.getPath() + "/" + i + ".jobj")); } catch(IOException e) { e.printStackTrace(); } } } else { System.out.println("your names and object are not the same lenght"); } } public static void writeSerializedObjects(File f, Object... obj) { String[] names = new String[obj.length]; for(int i = 0; i < obj.length; i++) names[i] = obj[i].getClass().getSimpleName(); writeSerializedObjects(f, names, obj); } }
3e11cbb8ba5422c0e219d7e8ccdfd8a8210b90f4
4,938
java
Java
AF_Game_Final_Debug/src/CutsceneReader.java
cbraunsch-dev/AceFighter
70e88013d67a95a5c2fafb25bb70aed02e0f42d1
[ "MIT" ]
null
null
null
AF_Game_Final_Debug/src/CutsceneReader.java
cbraunsch-dev/AceFighter
70e88013d67a95a5c2fafb25bb70aed02e0f42d1
[ "MIT" ]
null
null
null
AF_Game_Final_Debug/src/CutsceneReader.java
cbraunsch-dev/AceFighter
70e88013d67a95a5c2fafb25bb70aed02e0f42d1
[ "MIT" ]
null
null
null
30.670807
134
0.699676
7,510
/** * @(#)AceFighter1_8 -> Cutscene Engine -> Cutscene Reader * * The Cutscene Reader reads the text file containing all the cutscene data. * It then formats that data so that the Cutscene Engine can place it into its * data structures and be ready to execute the cutscenes. * * @author Chris Braunschweiler * @version 1.00 March 20, 2008 */ import java.io.*; import java.util.*; class CutsceneReader { //Variables for reading the script private String filename; //the name of the level script file InputStream scriptStream; BufferedReader br; //Variables for storing the Cutscene script data private ArrayList<String> cutsceneEvents; //the list of Cutscene Events. A cutscene event is formatted //as follows: KeyPress_A Dialog. The first word of the String is the condition //to be met and the second word is the cutscene event that is executed when that //condition is met. private ArrayList<Command> cutsceneCommands; //the list of CutsceneCommands. A command consists of: PlayerName, Action, Duration. private ArrayList<DialogEntity> cutsceneDialogs; //the list of Cutscene dialogs that are gonna be executed. A DialogEvent consits of: //the name of the speaker, the number of lines of dialog to be spoken, and the //list of Strings that represent the lines of dialog that are going to be spoken. //Constructor public CutsceneReader(String filename) { this.filename = filename; if(filename==null) System.out.println("CutsceneFile name is null"); //try //{ //try //{ scriptStream = this.getClass().getResourceAsStream(filename); //scriptStream = new FileInputStream(new File(filename)); //scriptStream = this.getClass().getResourceAsStream(filename); if(scriptStream==null) { System.out.println("Script Stream is null"); System.out.println("Desired File might not exist."); } br = new BufferedReader(new InputStreamReader(scriptStream)); cutsceneEvents = new ArrayList<String>(); cutsceneCommands = new ArrayList<Command>(); cutsceneDialogs = new ArrayList<DialogEntity>(); /*} catch(IOException e) { System.out.println(e.getMessage()); }*/ } //Accessor Methods //Returns the list of Cutscene Events. public ArrayList<String> getCutsceneEvents() { return cutsceneEvents; } //Returns the list of Cutscene Commands. public ArrayList<Command> getCutsceneCommands() { return cutsceneCommands; } //Returns the list of Cutscene Dialogs. public ArrayList<DialogEntity> getCutsceneDialogs() { return cutsceneDialogs; } //Methods for reading the cutscene script file public void readCutsceneData() { String nextLine = ""; try { nextLine = br.readLine(); String numCutsceneEventsString = nextLine; Integer integer = Integer.parseInt(numCutsceneEventsString); int numEvents = integer.intValue(); for(int i = 0; i<numEvents; i++) //read all events of this Cutscene { nextLine = br.readLine(); //System.out.println("Line: " + nextLine); String cutsceneEvent = nextLine; Scanner scanner = new Scanner(cutsceneEvent); String condition = scanner.next(); String event = scanner.next(); if(event.equals("Command")) //if event is a Command (such as: Player3 Fire 500) readCommandData(); if(event.equals("Dialog")) //if event is a Dialog readDialogData(); cutsceneEvents.add(cutsceneEvent); } } catch(IOException e) { System.out.println("Error in readCutsceneData() in CutsceneReader: IOException"); } } public void readCommandData() { //System.out.println("**readCommand**"); String nextLine = ""; try { nextLine = br.readLine(); //System.out.println("**Line: " + nextLine); String actionCommand = nextLine; Scanner scanner = new Scanner(nextLine); String playerName = scanner.next(); String action = scanner.next(); String durationString = scanner.next(); Integer longNumber = Integer.parseInt(durationString); int duration = longNumber.intValue(); cutsceneCommands.add(new Command(playerName, action, duration)); } catch(IOException e) { System.out.println("Error in readCommandData() in CutsceneReader: IOException"); } } public void readDialogData() { //System.out.println("##readDialog##"); String nextLine = ""; try { nextLine = br.readLine(); //System.out.println("##Line: " + nextLine); String playerName = nextLine; nextLine = br.readLine(); Integer integer = Integer.parseInt(nextLine); int numLines = integer.intValue(); ArrayList<String> dialogLines = new ArrayList<String>(); for(int i = 0; i<numLines; i++) { nextLine = br.readLine(); dialogLines.add(nextLine); } cutsceneDialogs.add(new DialogEntity(playerName, numLines, dialogLines)); } catch(IOException e) { System.out.println("Error in readDialogData() in CutsceneReader: IOException"); } } }
3e11cc96cd5162b0c14d7030807084247c25f1b8
89
java
Java
app/src/main/java/com/example/tufengyi/manlife/bean/Empty.java
StudyNoteOfTu/manlife
edc6183a164e9a23ebb9c09d75187e7bb4e24d8c
[ "Apache-2.0" ]
null
null
null
app/src/main/java/com/example/tufengyi/manlife/bean/Empty.java
StudyNoteOfTu/manlife
edc6183a164e9a23ebb9c09d75187e7bb4e24d8c
[ "Apache-2.0" ]
null
null
null
app/src/main/java/com/example/tufengyi/manlife/bean/Empty.java
StudyNoteOfTu/manlife
edc6183a164e9a23ebb9c09d75187e7bb4e24d8c
[ "Apache-2.0" ]
null
null
null
14.833333
42
0.707865
7,511
package com.example.tufengyi.manlife.bean; public class Empty { public Empty(){}; }
3e11cce344be472a7ef8e45e40333248bcaf80d7
2,430
java
Java
app/src/main/java/com/dev/kiko/myinvoice/model/invoice/InvoiceTable.java
goinhasf/MyInvoiceApp
a71514a3ed2e95fd860c9b625e846773064eaf63
[ "Apache-2.0" ]
null
null
null
app/src/main/java/com/dev/kiko/myinvoice/model/invoice/InvoiceTable.java
goinhasf/MyInvoiceApp
a71514a3ed2e95fd860c9b625e846773064eaf63
[ "Apache-2.0" ]
null
null
null
app/src/main/java/com/dev/kiko/myinvoice/model/invoice/InvoiceTable.java
goinhasf/MyInvoiceApp
a71514a3ed2e95fd860c9b625e846773064eaf63
[ "Apache-2.0" ]
null
null
null
23.823529
87
0.584774
7,512
package com.dev.kiko.myinvoice.model.invoice; /** * Created by kikogoinhas on 17/12/2017. */ import java.util.ArrayList; /** * This class will be used to store data about the items items included in the invoice. */ public class InvoiceTable { // A list containing the items in the invoice private ArrayList<InvoiceItem> items; /** * Default Constructor * */ public InvoiceTable() { items = new ArrayList<>(); } /** * Adds an item to the table * @param item The item to be added * @return Returns true if added successfully and false if otherwise. */ public boolean addItem(InvoiceItem item) { return items.add(item); } /** * Deletes an item from the invoice table * @param item The item to be deleted * @return Returns true if deleted successfully and false if otherwise. */ public boolean deleteItem(InvoiceItem item) { return items.remove(item); } /** * Returns the item at the specified position * @param index The index of the item in the list. * @return */ public InvoiceItem getItem(int index) { return items.get(index); } /** * Returns the number of items in the table. * @return Returns the number of items in the table. */ public int getItemCount() { return items.size(); } /** * This class represents the items in the invoice table */ private class InvoiceItem { // The number of items private int quantity; // The description of the item private String description; // The price of a single item private double unitPrice; /** * * @param quantity The number of items of this description * @param description The description of the item * @param unitPrice The price of each item */ public InvoiceItem(int quantity, String description, double unitPrice) { this.quantity = quantity; this.description = description; this.unitPrice = unitPrice; } /** * Returns the total price of the invoice item * @return returns the total price of the invoice item. */ public double total(){ return quantity*unitPrice; } } }
3e11ce09fd545c31c1828ecdce90b2f329bde065
220
java
Java
andhow-testing/andhow-annotation-processor-test-harness/src/test/resources/org/yarnandtail/compile/Simple.java
jnglman/andhow
309763a7b9ba6c62873adf76ca603e177d871b1a
[ "Apache-2.0" ]
28
2017-12-13T04:29:42.000Z
2022-03-09T01:31:58.000Z
andhow-testing/andhow-annotation-processor-test-harness/src/test/resources/org/yarnandtail/compile/Simple.java
jnglman/andhow
309763a7b9ba6c62873adf76ca603e177d871b1a
[ "Apache-2.0" ]
524
2016-12-08T19:41:04.000Z
2022-03-08T16:05:05.000Z
andhow-testing/andhow-annotation-processor-test-harness/src/test/resources/org/yarnandtail/compile/Simple.java
jnglman/andhow
309763a7b9ba6c62873adf76ca603e177d871b1a
[ "Apache-2.0" ]
41
2018-01-24T23:46:57.000Z
2021-08-20T13:55:29.000Z
16.923077
91
0.690909
7,513
package org.yarnandtail.compile; /** * Very simple class, copied to the test resource path for loading as a resource for tests. */ public class Simple { public Simple() {} public int takeFive() { return 5; } }
3e11ce6e6c23d714e38517e5be1b7f9b559c8f15
98
java
Java
src/engine/space/package-info.java
Benjamindavid03/2DPlatformer
ef9f53670d05325ca8a877e7a7e54a1b0d5724a3
[ "BSD-2-Clause" ]
22
2015-07-09T06:20:58.000Z
2021-07-03T15:32:55.000Z
src/engine/space/package-info.java
Benjamindavid03/2DPlatformer
ef9f53670d05325ca8a877e7a7e54a1b0d5724a3
[ "BSD-2-Clause" ]
5
2015-07-02T15:03:11.000Z
2015-08-14T09:06:25.000Z
src/engine/space/package-info.java
isabella232/2DPlatformer
89130de8673f12266d7d27add99e3e4d76ae6b2c
[ "BSD-2-Clause" ]
14
2015-01-26T03:21:39.000Z
2022-03-16T16:38:25.000Z
24.5
68
0.77551
7,514
/** * Classes for representing and manipulating multidimensional space. */ package engine.space;
3e11cf98cffc01dbaa2d26b8255de1822d31eb2f
2,435
java
Java
src/main/java/chae4ek/_5_MTDF.java
Chae4ek/search-alg-kekcia
f74aa96197db9eed32c2eaf204640e445427fe29
[ "MIT" ]
null
null
null
src/main/java/chae4ek/_5_MTDF.java
Chae4ek/search-alg-kekcia
f74aa96197db9eed32c2eaf204640e445427fe29
[ "MIT" ]
null
null
null
src/main/java/chae4ek/_5_MTDF.java
Chae4ek/search-alg-kekcia
f74aa96197db9eed32c2eaf204640e445427fe29
[ "MIT" ]
null
null
null
28.988095
93
0.599589
7,515
package chae4ek; import chae4ek.engine.GameEngineStuff; import java.util.Deque; public abstract class _5_MTDF extends GameEngineStuff { /*------------ Настройки бота ------------*/ private static final int MAX_DEPTH = 271828183; /** Максимальное время на ход */ private static final long MAX_TIME = 5000; private GameNode currNode; // текущее состояние игры /*------------ Получение следующего хода у бота ------------*/ private static boolean timesUp(final long startTime) { return System.currentTimeMillis() - startTime > MAX_TIME; } public int iterativeDeepening(final long startTime, int firstGuess, final int depth) { for (int d = 1; d <= depth; ++d) { firstGuess = MTDF(firstGuess, d); if (timesUp(startTime)) break; } return firstGuess; } private int MTDF(final int firstGuess, final int depth) { int value = firstGuess; int lowerBound = MIN_VALUE; int upperBound = MAX_VALUE; int beta; do { beta = Math.max(value, lowerBound + 1); value = searchAlgorithmWithTT(beta - 1, beta, depth); if (value < beta) upperBound = value; else lowerBound = value; } while (lowerBound < upperBound); return value; } /** @return должен возвращать лучшую оценку для максимизирующего игрока */ int searchAlgorithmWithTT(final int alfa, final int beta, final int depth) { return -negamaxTT(false, -beta, -alfa, depth); // либо любой другой алгоритм } abstract int negamaxTT(boolean isMyMove, int alfa, int beta, int depth); /*------------ Точка запуска алгоритма поиска ------------*/ Move getNextBestMove() { final boolean isMyMove = true; final Deque<Move> allMoves = getLegalMovesFor(isMyMove); if (isTerminalNode(currNode)) throw new RuntimeException("Ходов нет, игра окончена"); int bestValue = MIN_VALUE; Move bestMove = allMoves.getFirst(); final long startTime = System.currentTimeMillis(); for (final Move move : allMoves) { makeMove(move); final int value = iterativeDeepening(startTime, 0, MAX_DEPTH); undoMove(move); if (value > bestValue) { bestValue = value; bestMove = move; } } return bestMove; } }
3e11d078abfaab4c2320f9c541a45eb48ffab57c
4,272
java
Java
zm-mailbox/soap/src/java/com/zimbra/soap/account/type/SMIMEPublicCertsStoreSpec.java
hernad/zimbra9
cf61ffa40d9600ab255ef4516ca25029fff6603b
[ "Apache-2.0" ]
null
null
null
zm-mailbox/soap/src/java/com/zimbra/soap/account/type/SMIMEPublicCertsStoreSpec.java
hernad/zimbra9
cf61ffa40d9600ab255ef4516ca25029fff6603b
[ "Apache-2.0" ]
null
null
null
zm-mailbox/soap/src/java/com/zimbra/soap/account/type/SMIMEPublicCertsStoreSpec.java
hernad/zimbra9
cf61ffa40d9600ab255ef4516ca25029fff6603b
[ "Apache-2.0" ]
null
null
null
36.512821
114
0.690075
7,516
/* * ***** BEGIN LICENSE BLOCK ***** * Zimbra Collaboration Suite Server * Copyright (C) 2011, 2012, 2013, 2014, 2016 Synacor, Inc. * * This program is free software: you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software Foundation, * version 2 of the License. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License along with this program. * If not, see <https://www.gnu.org/licenses/>. * ***** END LICENSE BLOCK ***** */ package com.zimbra.soap.account.type; import java.util.List; import com.google.common.base.Joiner; import com.google.common.base.MoreObjects; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlValue; import com.zimbra.common.soap.AccountConstants; import com.zimbra.soap.type.SourceLookupOpt; import com.zimbra.soap.type.StoreLookupOpt; @XmlAccessorType(XmlAccessType.NONE) public class SMIMEPublicCertsStoreSpec { private static Joiner COMMA_JOINER = Joiner.on(","); /** * @zm-api-field-description Lookup option related to stores. * <ul> * <li> <b>ANY</b> (default) : While iterating through stores, stop if any certs are found in a store and just * return those certs - remaining stores will not be attempted. * <li> <b>ALL</b>: Always iterate through all specified stores. * </ul> */ @XmlAttribute(name=AccountConstants.A_SMIME_STORE_LOOKUP_OPT /* storeLookupOpt */, required=false) private StoreLookupOpt storeLookupOpt; /** * @zm-api-field-description Lookup option related to sources configured for stores. * <ul> * <li> <b>ANY</b> : While iterating through multiple sources configured for a store, stop if any certificates * are found in one source - remaining configured sources will not be attempted. * <li> <b>ALL</b> (default) : Always iterate through all configured sources. * </ul> * Note: this only applies to the <b>LDAP</b> store. */ @XmlAttribute(name=AccountConstants.A_SMIME_SOURCE_LOOKUP_OPT /* sourceLookupOpt */, required=false) private SourceLookupOpt sourceLookupOpt; public SMIMEPublicCertsStoreSpec() { } public void setStoreLookupOpt(StoreLookupOpt storeLookupOpt) { this.storeLookupOpt = storeLookupOpt; } public void setSourceLookupOpt(SourceLookupOpt sourceLookupOpt) { this.sourceLookupOpt = sourceLookupOpt; } public StoreLookupOpt getStoreLookupOpt() { return storeLookupOpt; } public SourceLookupOpt getSourceLookupOpt() { return sourceLookupOpt; } private List<String> storeTypes = Lists.newArrayList(); /** * @zm-api-field-description Comma separated list of store types * <br /> * Valid store types: * <ol> * <li><b>CONTACT</b> - contacts * <li><b>GAL</b> - Global Address List (internal and external) * <li><b>LDAP</b> - external LDAP (see GetSMIMEConfig and ModifySMIMEConfig) * </ol> */ @XmlValue public String getStoreTypes() { return COMMA_JOINER.join(storeTypes); } public void addStoreType(String storeType) { this.storeTypes.add(storeType); } public void addStoreTypes(Iterable<String> storeTypes) { if (storeTypes != null) { Iterables.addAll(this.storeTypes, storeTypes); } } public MoreObjects.ToStringHelper addToStringInfo( MoreObjects.ToStringHelper helper) { return helper .add("storeLookupOpt", storeLookupOpt) .add("sourceLookupOpt", sourceLookupOpt) .add("storeTypes", storeTypes); } @Override public String toString() { return addToStringInfo(MoreObjects.toStringHelper(this)) .toString(); } }
3e11d24bb8f1cd17d6357c7ff2adecb0d3421ec6
5,762
java
Java
Troll Wars/src/nl/knokko/players/PlayerGothrok.java
knokko/Troll-Wars
233151bce34dc215afe7bb62fc0d435801320b95
[ "MIT" ]
null
null
null
Troll Wars/src/nl/knokko/players/PlayerGothrok.java
knokko/Troll-Wars
233151bce34dc215afe7bb62fc0d435801320b95
[ "MIT" ]
null
null
null
Troll Wars/src/nl/knokko/players/PlayerGothrok.java
knokko/Troll-Wars
233151bce34dc215afe7bb62fc0d435801320b95
[ "MIT" ]
null
null
null
29.854922
197
0.730996
7,517
/******************************************************************************* * The MIT License * * Copyright (c) 2019 knokko * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. *******************************************************************************/ package nl.knokko.players; import nl.knokko.area.creature.AreaCreature; import nl.knokko.area.creature.AreaPlayer; import nl.knokko.battle.element.ElementalStatistics; import nl.knokko.battle.element.SimpleElementStats; import nl.knokko.inventory.InventoryType; import nl.knokko.items.Item; import nl.knokko.items.Items; import nl.knokko.main.Game; import nl.knokko.model.ModelOwner; import nl.knokko.model.ModelPart; import nl.knokko.model.body.BodyTroll; import nl.knokko.players.moves.PlayerMovesGothrok; import nl.knokko.util.bits.BitInput; import nl.knokko.util.bits.BitOutput; public class PlayerGothrok extends Player { private boolean leftHand; public PlayerGothrok() {} @Override public boolean alwaysAvailable() { return true; } @Override public boolean canEquip(InventoryType type) { return true; } @Override public boolean canEquip(Item item) { return true; } @Override public void initialiseExtraFirstGame() { equipment.equipRightWeapon(Items.POG_BONE); Game.getPlayerInventory().addItem(Items.SORG_SPEAR); refreshRightWeapon(getAreaPlayer()); turnSpeed = 10; maxHealth = 1000; maxMana = 500; maxFocus = 300; maxEnergy = 500; maxRage = 500; strength = 100; spirit = 100; } @Override public AreaPlayer getAreaPlayer(){ return Game.getArea().getArea().getPlayer(true); } @Override public void refreshHelmet(ModelOwner ap){ if(equipment.getHelmet() != null) BodyTroll.Helper.setHelmet(ap, equipment.getHelmet().createModel(b(ap))); else BodyTroll.Helper.setHelmet(ap, null); } @Override public void refreshLeftWeapon(ModelOwner ap){ if(equipment.getLeftWeapon() != null) BodyTroll.Helper.setLeftWeapon(ap, equipment.getLeftWeapon().createModel(b(ap), true)); else BodyTroll.Helper.setLeftWeapon(ap, null); } @Override public void refreshRightWeapon(ModelOwner ap){ if(equipment.getRightWeapon() != null) BodyTroll.Helper.setRightWeapon(ap, equipment.getRightWeapon().createModel(b(ap), true)); else BodyTroll.Helper.setRightWeapon(ap, null); } @Override public void refreshLeftGlobe(ModelOwner ap){ if(equipment.getLeftGlobe() != null) BodyTroll.Helper.setLeftGlobe(ap, equipment.getLeftGlobe().createModelLeft(b(ap))); else BodyTroll.Helper.setLeftGlobe(ap, null); } @Override public void refreshRightGlobe(ModelOwner ap){ if(equipment.getRightGlobe() != null) BodyTroll.Helper.setRightGlobe(ap, equipment.getRightGlobe().createModelRight(b(ap))); else BodyTroll.Helper.setRightGlobe(ap, null); } @Override public void refreshChestplate(ModelOwner ap){ if(equipment.getChestplate() != null) BodyTroll.Helper.setChestplate(ap, equipment.getChestplate().createModelBelly(b(ap)), equipment.getChestplate().createModelUpperArm(b(ap)), equipment.getChestplate().createModelUnderArm(b(ap))); else BodyTroll.Helper.setChestplate(ap, (ModelPart) null, null, null); } @Override public void refreshPants(ModelOwner ap){ if(equipment.getPants() != null) BodyTroll.Helper.setPants(ap, equipment.getPants().createModelUpperLeg(b(ap)), equipment.getPants().createModelUnderLeg(b(ap))); else BodyTroll.Helper.setPants(ap, (ModelPart) null, null); } @Override public void refreshLeftShoe(ModelOwner ap){ if(equipment.getLeftShoe() != null) BodyTroll.Helper.setLeftShoe(ap, equipment.getLeftShoe().createModelLeft(b(ap), b(ap))); else BodyTroll.Helper.setLeftShoe(ap, null); } @Override public void refreshRightShoe(ModelOwner ap){ if(equipment.getRightShoe() != null) BodyTroll.Helper.setRightShoe(ap, equipment.getRightShoe().createModelRight(b(ap), b(ap))); else BodyTroll.Helper.setRightShoe(ap, null); } @Override public ElementalStatistics getNaturalElementStats() { return SimpleElementStats.TROLL; } @Override public BitOutput save(BitOutput buffer){ buffer = super.save(buffer); buffer.addBoolean(leftHand); return buffer; } @Override public void load(BitInput buffer){ super.load(buffer); leftHand = buffer.readBoolean(); } @Override protected PlayerMovesGothrok createMoves() { return new PlayerMovesGothrok(); } public void swapHands(){ leftHand = !leftHand; } public boolean useLeftHand(){ return leftHand; } public boolean useRightHand(){ return !leftHand; } private static BodyTroll b(ModelOwner owner){ return owner instanceof AreaCreature ? BodyTroll.Models.AREA_GOTHROK : BodyTroll.Models.BATTLE_GOTHROK; } }
3e11d260276a69ad7e3d200f9a0c24c557845fa8
1,105
java
Java
Minecraft/build/tmp/recompileMc/sources/net/minecraft/client/audio/SoundRegistry.java
QinxiWang/Mincraft-Agent-Learn-to-Fight-Zombies-and-Slimes
019deb0b37674289d077fb69800ee97e5fdb47d1
[ "MIT" ]
22
2021-01-10T20:58:45.000Z
2021-12-19T18:11:35.000Z
Minecraft/build/tmp/recompileMc/sources/net/minecraft/client/audio/SoundRegistry.java
QinxiWang/Mincraft-Agent-Learn-to-Fight-Zombies-and-Slimes
019deb0b37674289d077fb69800ee97e5fdb47d1
[ "MIT" ]
3
2021-01-10T20:59:19.000Z
2021-01-13T14:00:58.000Z
Minecraft/build/tmp/recompileMc/sources/net/minecraft/client/audio/SoundRegistry.java
QinxiWang/Mincraft-Agent-Learn-to-Fight-Zombies-and-Slimes
019deb0b37674289d077fb69800ee97e5fdb47d1
[ "MIT" ]
4
2020-12-20T02:10:44.000Z
2022-01-25T19:38:53.000Z
29.864865
87
0.729412
7,518
package net.minecraft.client.audio; import com.google.common.collect.Maps; import java.util.Map; import net.minecraft.util.ResourceLocation; import net.minecraft.util.registry.RegistrySimple; import net.minecraftforge.fml.relauncher.Side; import net.minecraftforge.fml.relauncher.SideOnly; @SideOnly(Side.CLIENT) public class SoundRegistry extends RegistrySimple<ResourceLocation, SoundEventAccessor> { /** Contains all registered sound */ private Map<ResourceLocation, SoundEventAccessor> soundRegistry; /** * Creates the Map we will use to map keys to their registered values. */ protected Map<ResourceLocation, SoundEventAccessor> createUnderlyingMap() { this.soundRegistry = Maps.<ResourceLocation, SoundEventAccessor>newHashMap(); return this.soundRegistry; } public void add(SoundEventAccessor accessor) { this.putObject(accessor.getLocation(), accessor); } /** * Reset the underlying sound map (Called on resource manager reload) */ public void clearMap() { this.soundRegistry.clear(); } }
3e11d317ca8c361aa8ab6ee8e59b4661aeb7bdcf
987
java
Java
bedrock/bedrock-v291/src/main/java/com/nukkitx/protocol/bedrock/v291/serializer/MapInfoRequestSerializer_v291.java
lt-name/Protocol
587fe15c3da158729badb1a93819f0317860ffa5
[ "Apache-2.0" ]
149
2020-07-03T10:52:51.000Z
2022-03-24T16:41:36.000Z
bedrock/bedrock-v291/src/main/java/com/nukkitx/protocol/bedrock/v291/serializer/MapInfoRequestSerializer_v291.java
lt-name/Protocol
587fe15c3da158729badb1a93819f0317860ffa5
[ "Apache-2.0" ]
32
2020-07-04T18:38:15.000Z
2022-03-30T07:39:07.000Z
bedrock/bedrock-v291/src/main/java/com/nukkitx/protocol/bedrock/v291/serializer/MapInfoRequestSerializer_v291.java
lt-name/Protocol
587fe15c3da158729badb1a93819f0317860ffa5
[ "Apache-2.0" ]
49
2020-07-08T13:48:15.000Z
2022-02-22T10:47:33.000Z
39.48
102
0.81459
7,519
package com.nukkitx.protocol.bedrock.v291.serializer; import com.nukkitx.network.VarInts; import com.nukkitx.protocol.bedrock.BedrockPacketHelper; import com.nukkitx.protocol.bedrock.BedrockPacketSerializer; import com.nukkitx.protocol.bedrock.packet.MapInfoRequestPacket; import io.netty.buffer.ByteBuf; import lombok.AccessLevel; import lombok.NoArgsConstructor; @NoArgsConstructor(access = AccessLevel.PROTECTED) public class MapInfoRequestSerializer_v291 implements BedrockPacketSerializer<MapInfoRequestPacket> { public static final MapInfoRequestSerializer_v291 INSTANCE = new MapInfoRequestSerializer_v291(); @Override public void serialize(ByteBuf buffer, BedrockPacketHelper helper, MapInfoRequestPacket packet) { VarInts.writeLong(buffer, packet.getUniqueMapId()); } @Override public void deserialize(ByteBuf buffer, BedrockPacketHelper helper, MapInfoRequestPacket packet) { packet.setUniqueMapId(VarInts.readLong(buffer)); } }
3e11d698a32f138d98017314138f68d69b479b2d
978
java
Java
ecardflow/src/main/java/moe/codeest/ecardflow/mode/CrossMoveAnimMode.java
Biangkerok32/ECardFlow
b04568b9742de60ae1ddfc507c3995e168202716
[ "MIT" ]
52
2017-02-06T03:17:16.000Z
2021-09-13T08:12:47.000Z
ecardflow/src/main/java/moe/codeest/ecardflow/mode/CrossMoveAnimMode.java
Biangkerok32/ECardFlow
b04568b9742de60ae1ddfc507c3995e168202716
[ "MIT" ]
2
2018-02-19T11:29:24.000Z
2020-09-07T06:13:01.000Z
ecardflow/src/main/java/moe/codeest/ecardflow/mode/CrossMoveAnimMode.java
Biangkerok32/ECardFlow
b04568b9742de60ae1ddfc507c3995e168202716
[ "MIT" ]
15
2017-04-11T11:20:02.000Z
2019-10-09T00:36:42.000Z
26.432432
78
0.630879
7,520
package moe.codeest.ecardflow.mode; import android.widget.ImageView; /** * Created by codeest on 2017/2/25. */ public class CrossMoveAnimMode implements AnimMode { private static final float DEFAULT_MOVE_SCALE = 1.3f; private float mScale; public CrossMoveAnimMode() { this.mScale = DEFAULT_MOVE_SCALE; } public CrossMoveAnimMode(float mScale) { this.mScale = mScale; } @Override public void transformPage(ImageView ivBg, float position, int direction) { ivBg.setScaleX(mScale); ivBg.setScaleY(mScale); float totalMoveWidth = ivBg.getWidth() * ((mScale - 1) / 2); int lastPosition = Math.round(position); float mFraction; if (lastPosition % 2 == 0) { mFraction = -1 * (float) Math.sin(Math.PI * position); } else { mFraction = (float) Math.sin(Math.PI * position); } ivBg.setTranslationY(totalMoveWidth * mFraction); } }
3e11d738e6cf274d9a8ac0af3d993bb99e1bf1b7
4,208
java
Java
ebean-core/src/main/java/io/ebeaninternal/server/deploy/parse/AnnotationParser.java
tibco-jufernan/ebean-h2-fix
acfd882078ad154886a81c7baf193d88d58877f8
[ "Apache-2.0" ]
1,041
2016-08-03T12:27:03.000Z
2022-03-31T20:06:05.000Z
ebean-core/src/main/java/io/ebeaninternal/server/deploy/parse/AnnotationParser.java
tibco-jufernan/ebean-h2-fix
acfd882078ad154886a81c7baf193d88d58877f8
[ "Apache-2.0" ]
1,501
2016-08-03T11:37:04.000Z
2022-03-31T20:03:05.000Z
ebean-core/src/main/java/io/ebeaninternal/server/deploy/parse/AnnotationParser.java
tibco-jufernan/ebean-h2-fix
acfd882078ad154886a81c7baf193d88d58877f8
[ "Apache-2.0" ]
196
2016-08-09T03:26:24.000Z
2022-03-27T15:12:06.000Z
30.941176
95
0.703184
7,521
package io.ebeaninternal.server.deploy.parse; import io.ebeaninternal.server.deploy.BeanCascadeInfo; import io.ebeaninternal.server.deploy.meta.DeployBeanDescriptor; import io.ebeaninternal.server.deploy.meta.DeployBeanProperty; import io.ebeaninternal.server.deploy.meta.DeployBeanPropertyAssoc; import io.ebeaninternal.server.deploy.meta.DeployBeanPropertyAssocOne; import javax.persistence.AttributeOverride; import javax.persistence.CascadeType; import javax.persistence.Column; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.UUID; /** * Base class for reading deployment annotations. */ public abstract class AnnotationParser extends AnnotationBase { final DeployBeanInfo<?> info; final DeployBeanDescriptor<?> descriptor; final Class<?> beanType; final ReadAnnotationConfig readConfig; AnnotationParser(DeployBeanInfo<?> info, ReadAnnotationConfig readConfig) { super(info.getUtil()); this.readConfig = readConfig; this.info = info; this.beanType = info.getDescriptor().getBeanType(); this.descriptor = info.getDescriptor(); } /** * read the deployment annotations. */ @Override public abstract void parse(); /** * Read the Id annotation on an embeddedId. */ void readIdAssocOne(DeployBeanPropertyAssoc<?> prop) { prop.setNullable(false); if (prop.isIdClass()) { prop.setImportedPrimaryKey(); } else { prop.setId(); prop.setEmbedded(); info.setEmbeddedId(prop); } } /** * Read the Id annotation on scalar property. */ void readIdScalar(DeployBeanProperty prop) { prop.setNullable(false); if (prop.isIdClass()) { prop.setImportedPrimaryKey(); } else { prop.setId(); if (prop.getPropertyType().equals(UUID.class) && readConfig.isIdGeneratorAutomatic()) { descriptor.setUuidGenerator(); } } } /** * Helper method to set cascade types to the CascadeInfo on BeanProperty. */ void setCascadeTypes(CascadeType[] cascadeTypes, BeanCascadeInfo cascadeInfo) { if (cascadeTypes != null && cascadeTypes.length > 0) { cascadeInfo.setTypes(cascadeTypes); } } /** * Read an AttributeOverrides if they exist for this embedded bean. */ void readEmbeddedAttributeOverrides(DeployBeanPropertyAssocOne<?> prop) { Set<AttributeOverride> attrOverrides = annotationAttributeOverrides(prop); if (!attrOverrides.isEmpty()) { Map<String, Column> propMap = new HashMap<>(attrOverrides.size()); for (AttributeOverride attrOverride : attrOverrides) { propMap.put(attrOverride.name(), attrOverride.column()); } prop.getDeployEmbedded().putAll(propMap); } } void readColumn(Column columnAnn, DeployBeanProperty prop) { setColumnName(prop, columnAnn.name()); prop.setDbInsertable(columnAnn.insertable()); prop.setDbUpdateable(columnAnn.updatable()); prop.setNullable(columnAnn.nullable()); prop.setUnique(columnAnn.unique()); if (columnAnn.precision() > 0) { prop.setDbLength(columnAnn.precision()); } else if (columnAnn.length() != 255) { // set default 255 on DbTypeMap prop.setDbLength(columnAnn.length()); } prop.setDbScale(columnAnn.scale()); prop.setDbColumnDefn(columnAnn.columnDefinition()); String baseTable = descriptor.getBaseTable(); String tableName = columnAnn.table(); if (!"".equals(tableName) && !tableName.equalsIgnoreCase(baseTable)) { // its on a secondary table... prop.setSecondaryTable(tableName); } } protected void setColumnName(DeployBeanProperty prop, String name) { if (!isEmpty(name)) { prop.setDbColumn(databasePlatform.convertQuotedIdentifiers(name)); } } String[] convertColumnNames(String[] columnNames) { for (int i = 0; i < columnNames.length; i++) { columnNames[i] = databasePlatform.convertQuotedIdentifiers(columnNames[i]); } return columnNames; } /** * Process any formula from &#64;Formula or &#64;Where. */ protected String processFormula(String source) { return source == null ? null : source.replace("${dbTableName}", descriptor.getBaseTable()); } }
3e11d7e9988386427e31c8e931b215237f099601
7,866
java
Java
backend/src/test/java/org/seng302/datagenerator/LocationGeneratorTest.java
shengyKai/Wasteless-SENG302-Group-Project
f31a4ccac75fdec1bd411ca8e9c10ccebbaa1ce7
[ "Unlicense" ]
null
null
null
backend/src/test/java/org/seng302/datagenerator/LocationGeneratorTest.java
shengyKai/Wasteless-SENG302-Group-Project
f31a4ccac75fdec1bd411ca8e9c10ccebbaa1ce7
[ "Unlicense" ]
null
null
null
backend/src/test/java/org/seng302/datagenerator/LocationGeneratorTest.java
shengyKai/Wasteless-SENG302-Group-Project
f31a4ccac75fdec1bd411ca8e9c10ccebbaa1ce7
[ "Unlicense" ]
null
null
null
40.132653
179
0.734045
7,522
package org.seng302.datagenerator; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.runner.RunWith; import org.seng302.leftovers.Main; import org.seng302.leftovers.persistence.UserRepository; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.test.context.junit4.SpringRunner; import java.sql.*; import java.util.List; import java.util.Map; import static org.junit.jupiter.api.Assertions.*; @RunWith(SpringRunner.class) @SpringBootTest(classes={Main.class}) class LocationGeneratorTest { private static final String STREET_NAMES_FILE = "street-names.txt"; private static final String CITIES_FILE = "cities.txt"; private static final String REGIONS_FILE = "regions.txt"; private static final String COUNTRIES_FILE = "countries.txt"; private static final String DISTRICTS_FILE = "districts.txt"; private Connection conn; // LocationGenerator is called by UserGenerator or BusinessGenerator, so it has to be called with either of them private UserGenerator userGenerator; @Autowired private UserRepository userRepository; //loads all the example files which are the same as the generators private List<String> streetNames = ExampleDataFileReader.readExampleDataFile(STREET_NAMES_FILE); private List<String> cities = ExampleDataFileReader.readExampleDataFile(CITIES_FILE); private List<String> regions = ExampleDataFileReader.readExampleDataFile(REGIONS_FILE); private List<String> countries = ExampleDataFileReader.readExampleDataFile(COUNTRIES_FILE); private List<String> districts = ExampleDataFileReader.readExampleDataFile(DISTRICTS_FILE); @BeforeEach public void setup() throws SQLException { Map<String, String> properties = ExampleDataFileReader.readPropertiesFile("/application.properties"); if (properties.get("spring.datasource.url") == null || properties.get("spring.datasource.username") == null || properties.get("spring.datasource.password") == null) { fail("The url/username/password is not found"); } this.conn = DriverManager.getConnection(properties.get("spring.datasource.url"), properties.get("spring.datasource.username"), properties.get("spring.datasource.password")); //Creates generators this.userGenerator = new UserGenerator(conn); } @AfterEach public void teardown() throws SQLException { userRepository.deleteAll(); conn.close(); } /** * Checks that the required fields within the location table are not null using an SQL query. * @param userId the id of the generated user */ public void checkRequiredFieldsNotNull(long userId) throws SQLException { PreparedStatement stmt = conn.prepareStatement( "SELECT COUNT(*) FROM user JOIN location ON user.address_id = location.id WHERE userid = ? AND " + "city IS NOT NULL AND country IS NOT NULL AND post_code IS NOT NULL AND " + "region IS NOT NULL AND street_name IS NOT NULL AND street_number IS NOT NULL" ); stmt.setObject(1, userId); stmt.executeQuery(); ResultSet results = stmt.getResultSet(); results.next(); assertEquals(1, results.getLong(1)); } /** * Queries that database to find out how many location entries are in the database * @return the number of location entries in the database */ public long getNumLocationsInDB() throws SQLException { PreparedStatement stmt = conn.prepareStatement("SELECT COUNT(*) FROM location"); stmt.executeQuery(); ResultSet results = stmt.getResultSet(); results.next(); return results.getLong(1); } /** * Gets the location id from the generated user. * @param userId id of the inquired user from the table * @return id of the location from the table */ public Long getLocationIdFromDB(Long userId) throws SQLException { PreparedStatement stmt = conn.prepareStatement("SELECT address_id FROM user WHERE userid = ?"); stmt.setObject(1, userId); stmt.executeQuery(); ResultSet results = stmt.getResultSet(); results.next(); return results.getLong(1); } /** * Gets the specified field value of the location from the DB to check the field validity. * @param fieldName Specified location field that is required * @param locationId Location Id of the generated location * @return a String containing the field value of the location from the DB */ public String getLocationFieldFromDB(String fieldName, Long locationId) throws SQLException { PreparedStatement stmt = conn.prepareStatement("SELECT " + fieldName + " FROM location WHERE id = ?"); stmt.setObject(1, locationId); stmt.executeQuery(); ResultSet results = stmt.getResultSet(); results.next(); return results.getString(1); } @Test void generateOneLocation_generateOneUser_oneLocationEntryGenerated() throws SQLException { List<Long> userIds = userGenerator.generateUsers(1); assertEquals(1, userIds.size()); long userId = userIds.get(0); checkRequiredFieldsNotNull(userId); } @Test void generateMultipleLocations_generateMultipleUsers_multipleLocationEntriesGenerated() throws SQLException { List<Long> userIds = userGenerator.generateUsers(10); assertEquals(10, userIds.size()); for (long userId: userIds) { checkRequiredFieldsNotNull(userId); } } @Test void generateZeroLocations_generateZeroUsers_zeroLocationEntriesGenerated() throws SQLException { List<Long> userIds = userGenerator.generateUsers(0); assertEquals(0, userIds.size()); } @Test void generateNegativeLocations_generateNegativeUsers_noLocationEntriesGenerated() throws SQLException { List<Long> userIds = userGenerator.generateUsers(-1); assertEquals(0, userIds.size()); } @Test void checkCityValidity_checkedAgainstExampleDataFile_cityIsValid() throws SQLException { List<Long> userIds = userGenerator.generateUsers(1); Long locationId = getLocationIdFromDB(userIds.get(0)); String cityResult = getLocationFieldFromDB("city", locationId); assertTrue((cities.contains(cityResult))); } @Test void checkStreetNameValidity_checkedAgainstExampleDataFile_streetNameIsValid() throws SQLException { List<Long> userIds = userGenerator.generateUsers(1); Long locationId = getLocationIdFromDB(userIds.get(0)); String streetNameResult = getLocationFieldFromDB("street_name", locationId); assertTrue((streetNames.contains(streetNameResult))); } @Test void checkRegionValidity_checkedAgainstExampleDataFile_regionIsValid() throws SQLException { List<Long> userIds = userGenerator.generateUsers(1); Long locationId = getLocationIdFromDB(userIds.get(0)); String regionResult = getLocationFieldFromDB("region", locationId); assertTrue((regions.contains(regionResult))); } @Test void checkCountryValidity_checkedAgainstExampleDataFile_countryIsValid() throws SQLException { List<Long> userIds = userGenerator.generateUsers(1); Long locationId = getLocationIdFromDB(userIds.get(0)); String countryResult = getLocationFieldFromDB("country", locationId); assertTrue((countries.contains(countryResult))); } @Test void checkDistrictValidity_checkedAgainstExampleDataFile_districtIsValid() throws SQLException { List<Long> userIds = userGenerator.generateUsers(1); Long locationId = getLocationIdFromDB(userIds.get(0)); String districtResult = getLocationFieldFromDB("district", locationId); assertTrue((districts.contains(districtResult))); } }
3e11d80d400f8d03df92eb0bc457b1f2f2287072
2,900
java
Java
app/src/main/java/com/lucasvinicius/userslisting/view/AdministrationFragment.java
Lucas-Vinicius27/UsersListing
5850afa0f844e7104bb0e61fbffe8714cf98ae40
[ "MIT" ]
null
null
null
app/src/main/java/com/lucasvinicius/userslisting/view/AdministrationFragment.java
Lucas-Vinicius27/UsersListing
5850afa0f844e7104bb0e61fbffe8714cf98ae40
[ "MIT" ]
null
null
null
app/src/main/java/com/lucasvinicius/userslisting/view/AdministrationFragment.java
Lucas-Vinicius27/UsersListing
5850afa0f844e7104bb0e61fbffe8714cf98ae40
[ "MIT" ]
null
null
null
37.662338
123
0.744483
7,523
package com.lucasvinicius.userslisting.view; import android.os.Bundle; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.TextView; import androidx.annotation.NonNull; import androidx.fragment.app.Fragment; import androidx.lifecycle.Observer; import androidx.lifecycle.ViewModelProvider; import androidx.recyclerview.widget.LinearLayoutManager; import androidx.recyclerview.widget.RecyclerView; import com.lucasvinicius.userslisting.R; import com.lucasvinicius.userslisting.constants.LevelConstants; import com.lucasvinicius.userslisting.model.UsersModel; import com.lucasvinicius.userslisting.view.adapter.AdministrationAdapter; import com.lucasvinicius.userslisting.viewmodel.AdministrationViewModel; import java.util.List; public class AdministrationFragment extends Fragment { private ViewHolder mViewHolder = new ViewHolder(); private AdministrationViewModel mAdministrationViewModel; private AdministrationAdapter mAdministrationAdapter = new AdministrationAdapter(); private Integer mFilter = 0; public View onCreateView(@NonNull LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { this.mAdministrationViewModel = new ViewModelProvider(this).get(AdministrationViewModel.class); View root = inflater.inflate(R.layout.fragment_administration, container, false); this.mViewHolder.textView = root.findViewById(R.id.text_view_label); this.mViewHolder.recyclerView = root.findViewById(R.id.recycler_view_users); this.mViewHolder.recyclerView.setLayoutManager(new LinearLayoutManager(getContext())); this.mViewHolder.recyclerView.setAdapter(this.mAdministrationAdapter); this.observers(); if (getArguments() != null) { this.mFilter = getArguments().getInt(LevelConstants.filter); } if (this.mFilter.equals(LevelConstants.guest)) { this.mViewHolder.textView.setText(R.string.text_view_guest_area); } else if (this.mFilter.equals(LevelConstants.director)) { this.mViewHolder.textView.setText(R.string.text_view_board_area); } else { this.mViewHolder.textView.setText(R.string.text_view_administrative_area); } return root; } @Override public void onResume() { super.onResume(); this.mAdministrationViewModel.getList(this.mFilter); } private void observers() { this.mAdministrationViewModel.listLiveDataUsers.observe(getViewLifecycleOwner(), new Observer<List<UsersModel>>() { @Override public void onChanged(List<UsersModel> listUsersModels) { mAdministrationAdapter.attachList(listUsersModels); } }); } private static class ViewHolder { RecyclerView recyclerView; TextView textView; } }
3e11d8a2815b74f5379ae0d74321fd5f3200c6c7
1,586
java
Java
commdomain/src/main/java/com/wenc/commdomain/vo/stb/EStoreTreeVO.java
ant2008/wis-aftersale
d305802ab4e282b81e7763cd1b47e689c0f8e5fd
[ "MIT" ]
null
null
null
commdomain/src/main/java/com/wenc/commdomain/vo/stb/EStoreTreeVO.java
ant2008/wis-aftersale
d305802ab4e282b81e7763cd1b47e689c0f8e5fd
[ "MIT" ]
null
null
null
commdomain/src/main/java/com/wenc/commdomain/vo/stb/EStoreTreeVO.java
ant2008/wis-aftersale
d305802ab4e282b81e7763cd1b47e689c0f8e5fd
[ "MIT" ]
null
null
null
19.825
61
0.629256
7,524
package com.wenc.commdomain.vo.stb; import java.util.List; public class EStoreTreeVO { private String title; private String code; private Boolean expand; private Boolean disabled; private Boolean disableCheckbox; private Boolean selected; private Boolean checked; private List<EStoreTreeVO> children; public String getTitle() { return title; } public void setTitle(String title) { this.title = title; } public String getCode() { return code; } public void setCode(String code) { this.code = code; } public Boolean getExpand() { return expand; } public void setExpand(Boolean expand) { this.expand = expand; } public Boolean getDisabled() { return disabled; } public void setDisabled(Boolean disabled) { this.disabled = disabled; } public Boolean getDisableCheckbox() { return disableCheckbox; } public void setDisableCheckbox(Boolean disableCheckbox) { this.disableCheckbox = disableCheckbox; } public Boolean getSelected() { return selected; } public void setSelected(Boolean selected) { this.selected = selected; } public Boolean getChecked() { return checked; } public void setChecked(Boolean checked) { this.checked = checked; } public List<EStoreTreeVO> getChildren() { return children; } public void setChildren(List<EStoreTreeVO> children) { this.children = children; } }
3e11d92bd81cf6b7a7fd6e1686c0008b6aa639e3
3,356
java
Java
src/main/java/com/microsoft/graph/requests/extensions/AdministrativeUnitReferenceRequestBuilder.java
isabella232/msgraph-beta-sdk-java
7d2b929d5c99c01ec1af1a251f4bf5876ca95ed8
[ "MIT" ]
null
null
null
src/main/java/com/microsoft/graph/requests/extensions/AdministrativeUnitReferenceRequestBuilder.java
isabella232/msgraph-beta-sdk-java
7d2b929d5c99c01ec1af1a251f4bf5876ca95ed8
[ "MIT" ]
1
2021-02-23T20:48:12.000Z
2021-02-23T20:48:12.000Z
src/main/java/com/microsoft/graph/requests/extensions/AdministrativeUnitReferenceRequestBuilder.java
isabella232/msgraph-beta-sdk-java
7d2b929d5c99c01ec1af1a251f4bf5876ca95ed8
[ "MIT" ]
null
null
null
49.352941
188
0.767878
7,525
// ------------------------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information. // ------------------------------------------------------------------------------ package com.microsoft.graph.requests.extensions; import com.microsoft.graph.http.IRequestBuilder; import com.microsoft.graph.core.ClientException; import com.microsoft.graph.concurrency.ICallback; import com.microsoft.graph.models.extensions.AdministrativeUnit; import com.microsoft.graph.requests.extensions.IDirectoryObjectCollectionRequestBuilder; import com.microsoft.graph.requests.extensions.IDirectoryObjectRequestBuilder; import com.microsoft.graph.requests.extensions.DirectoryObjectCollectionRequestBuilder; import com.microsoft.graph.requests.extensions.DirectoryObjectRequestBuilder; import com.microsoft.graph.requests.extensions.IScopedRoleMembershipCollectionRequestBuilder; import com.microsoft.graph.requests.extensions.IScopedRoleMembershipRequestBuilder; import com.microsoft.graph.requests.extensions.ScopedRoleMembershipCollectionRequestBuilder; import com.microsoft.graph.requests.extensions.ScopedRoleMembershipRequestBuilder; import com.microsoft.graph.requests.extensions.IExtensionCollectionRequestBuilder; import com.microsoft.graph.requests.extensions.IExtensionRequestBuilder; import com.microsoft.graph.requests.extensions.ExtensionCollectionRequestBuilder; import com.microsoft.graph.requests.extensions.ExtensionRequestBuilder; import java.util.Arrays; import java.util.EnumSet; import com.microsoft.graph.http.BaseRequestBuilder; import com.microsoft.graph.core.IBaseClient; // **NOTE** This file was generated by a tool and any changes will be overwritten. /** * The class for the Administrative Unit Reference Request Builder. */ public class AdministrativeUnitReferenceRequestBuilder extends BaseRequestBuilder implements IAdministrativeUnitReferenceRequestBuilder { /** * The request builder for the AdministrativeUnit * * @param requestUrl the request URL * @param client the service client * @param requestOptions the options for this request */ public AdministrativeUnitReferenceRequestBuilder(final String requestUrl, final IBaseClient client, final java.util.List<? extends com.microsoft.graph.options.Option> requestOptions) { super(requestUrl, client, requestOptions); } /** * Creates the request * * @param requestOptions the options for this request * @return The IAdministrativeUnitReferenceRequest instance */ public IAdministrativeUnitReferenceRequest buildRequest(final com.microsoft.graph.options.Option... requestOptions) { return buildRequest(getOptions(requestOptions)); } /** * Creates the request with specific requestOptions instead of the existing requestOptions * * @param requestOptions the options for this request * @return the IAdministrativeUnitReferenceRequest instance */ public IAdministrativeUnitReferenceRequest buildRequest(final java.util.List<? extends com.microsoft.graph.options.Option> requestOptions) { return new AdministrativeUnitReferenceRequest(getRequestUrl(), getClient(), requestOptions); } }
3e11d9fd2c5fc1982587d4e0204c915f066b9c3d
101
java
Java
spring-boot-14-dubbo/consumer_users/src/main/java/com/xubh/ticket/service/TicketService.java
786991884/spring-boot-study
b74ac43e26d4811f1f4d9f7dbade672c9d40ba87
[ "Apache-2.0" ]
1
2020-10-10T07:31:20.000Z
2020-10-10T07:31:20.000Z
spring-boot-14-dubbo/provider_ticket/src/main/java/com/xubh/ticket/service/TicketService.java
786991884/spring-boot-study
b74ac43e26d4811f1f4d9f7dbade672c9d40ba87
[ "Apache-2.0" ]
null
null
null
spring-boot-14-dubbo/provider_ticket/src/main/java/com/xubh/ticket/service/TicketService.java
786991884/spring-boot-study
b74ac43e26d4811f1f4d9f7dbade672c9d40ba87
[ "Apache-2.0" ]
null
null
null
14.428571
32
0.752475
7,526
package com.xubh.ticket.service; public interface TicketService { public String getTicket(); }
3e11dbe54f12d7c462e43c3db4f23e58ca0329c2
1,920
java
Java
_clones/data-structures-and-algorithms-in-java-6th-edition-goodrich-tamassia/src/main/java/dsaj/primer/InputExample.java
hsmak/data-structures-and-algorithms
2b302c5835b61881b9fd1d923e55e62d38dd6014
[ "MIT" ]
null
null
null
_clones/data-structures-and-algorithms-in-java-6th-edition-goodrich-tamassia/src/main/java/dsaj/primer/InputExample.java
hsmak/data-structures-and-algorithms
2b302c5835b61881b9fd1d923e55e62d38dd6014
[ "MIT" ]
null
null
null
_clones/data-structures-and-algorithms-in-java-6th-edition-goodrich-tamassia/src/main/java/dsaj/primer/InputExample.java
hsmak/data-structures-and-algorithms
2b302c5835b61881b9fd1d923e55e62d38dd6014
[ "MIT" ]
null
null
null
33.103448
82
0.686458
7,527
/* * Copyright 2014, Michael T. Goodrich, Roberto Tamassia, Michael H. Goldwasser * * Developed for use with the book: * * Data Structures and Algorithms in Java, Sixth Edition * Michael T. Goodrich, Roberto Tamassia, and Michael H. Goldwasser * John Wiley & Sons, 2014 * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package dsaj.primer; import java.util.Scanner; // loads Scanner definition for our use public class InputExample { public static void main(String[] args) { Scanner input = new Scanner(System.in); System.out.print("Enter your age in years: "); double age = input.nextDouble(); System.out.print("Enter your maximum heart rate: "); double rate = input.nextDouble(); double fb = (rate - age) * 0.65; System.out.println("Your ideal fat-burning heart rate is " + fb); } public static int getInt() { Scanner input = new Scanner(System.in); System.out.print("Please enter an integer: "); while (!input.hasNextInt()) { input.nextLine(); System.out.print("Invalid integer; please enter an integer: "); } int i = input.nextInt(); return i; } } /* Sample user session ------------------- Enter your age in years: 21 Enter your maximum heart rate: 220 Your target fat-burning heart rate is 129.35 */
3e11dd52beacaad154df642cdcb22c58efec0dd0
513
java
Java
Atlas/core/src/main/java/net/avicus/atlas/module/stats/action/lifetime/type/CompetitorLifetime.java
AtditC/AvicusNetwork
bae3acf9eccad54179dfefeb7170701ba25a287d
[ "MIT" ]
24
2018-01-30T19:17:13.000Z
2021-12-23T01:32:52.000Z
Atlas/core/src/main/java/net/avicus/atlas/module/stats/action/lifetime/type/CompetitorLifetime.java
AtditC/AvicusNetwork
bae3acf9eccad54179dfefeb7170701ba25a287d
[ "MIT" ]
7
2018-01-27T04:15:00.000Z
2019-01-28T20:55:33.000Z
Atlas/core/src/main/java/net/avicus/atlas/module/stats/action/lifetime/type/CompetitorLifetime.java
AtditC/AvicusNetwork
bae3acf9eccad54179dfefeb7170701ba25a287d
[ "MIT" ]
20
2018-01-27T01:17:38.000Z
2020-10-23T04:29:59.000Z
25.65
74
0.79922
7,528
package net.avicus.atlas.module.stats.action.lifetime.type; import java.time.Instant; import lombok.Getter; import lombok.ToString; import net.avicus.atlas.module.groups.Competitor; import net.avicus.atlas.module.stats.action.base.CompetitorAction; @ToString public class CompetitorLifetime extends ActionLifetime<CompetitorAction> { @Getter private final Competitor competitor; public CompetitorLifetime(Instant start, Competitor competitor) { super(start); this.competitor = competitor; } }
3e11ddbaefcbc92fae4a87b1ab0f1a40f19a9035
2,925
java
Java
src/test/java/org/apache/sling/distribution/journal/it/tests/StagedDistributionFailureTest.java
tmaret/org.apache.sling.distribution.journal.it
458f236dd66c05e416163c3532673e98d17fedee
[ "Apache-2.0" ]
null
null
null
src/test/java/org/apache/sling/distribution/journal/it/tests/StagedDistributionFailureTest.java
tmaret/org.apache.sling.distribution.journal.it
458f236dd66c05e416163c3532673e98d17fedee
[ "Apache-2.0" ]
null
null
null
src/test/java/org/apache/sling/distribution/journal/it/tests/StagedDistributionFailureTest.java
tmaret/org.apache.sling.distribution.journal.it
458f236dd66c05e416163c3532673e98d17fedee
[ "Apache-2.0" ]
null
null
null
30.789474
90
0.707692
7,529
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.sling.distribution.journal.it.tests; import org.apache.sling.distribution.journal.it.DistributionTestBase; import org.apache.sling.distribution.journal.it.ext.AfterOsgi; import org.apache.sling.distribution.journal.it.ext.BeforeOsgi; import org.apache.sling.distribution.journal.it.ext.ExtPaxExam; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.ops4j.pax.exam.TestContainer; import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; import org.ops4j.pax.exam.spi.reactors.PerClass; import java.io.IOException; @RunWith(ExtPaxExam.class) @ExamReactorStrategy(PerClass.class) public class StagedDistributionFailureTest extends DistributionTestBase { private static final String SUB1_AGENT = "subscriber-regular"; private static final String SUB2_AGENT = "subscriber-golden"; private static TestContainer publish; private static TestContainer golden_publish; private static final String TEST_PATH = "/content/mytest"; @BeforeOsgi public static void beforeOsgi() throws Exception { beforeOsgiBase(); publish = startPublishInstance(8182, SUB1_AGENT, false, SUB2_AGENT); new Thread(() -> { // Wait for at least one item in publish queue before starting golden publish waitQueueItems(8182, SUB1_AGENT, 1); LOG.info("Starting golden publish"); golden_publish = startPublishInstance(8183, SUB2_AGENT, true, null); }).start(); } @AfterOsgi public static void afterOsgi() throws IOException { if (publish != null) { publish.stop(); } if (golden_publish != null) { golden_publish.stop(); } afterOsgiBase(); } @Before public void before() { createPath(TEST_PATH); waitSubQueues(SUB1_AGENT); } @Test public void testDistribute() { distribute(TEST_PATH); waitSubQueues(SUB1_AGENT, SUB2_AGENT); waitEmptySubQueues(); waitPath(8182, TEST_PATH); waitPath(8183, TEST_PATH); } }
3e11df47037c994964e8d52c647c20a9367a41f0
2,954
java
Java
nfctools-ndef/src/main/java/org/nfctools/ndef/NdefRecordDecoder.java
cleberdealmeida/uniao
2bec9f1ff5f5114d9f43370651d60111b360f26f
[ "Apache-2.0" ]
183
2015-01-02T02:12:57.000Z
2021-11-07T20:23:18.000Z
nfctools-ndef/src/main/java/org/nfctools/ndef/NdefRecordDecoder.java
cleberdealmeida/uniao
2bec9f1ff5f5114d9f43370651d60111b360f26f
[ "Apache-2.0" ]
12
2015-07-15T13:55:54.000Z
2021-03-10T08:41:37.000Z
nfctools-ndef/src/main/java/org/nfctools/ndef/NdefRecordDecoder.java
cleberdealmeida/uniao
2bec9f1ff5f5114d9f43370651d60111b360f26f
[ "Apache-2.0" ]
66
2015-01-22T15:10:48.000Z
2021-11-19T08:54:11.000Z
37.846154
130
0.792005
7,530
/** * Copyright 2011-2012 Adrian Stabiszewski, dycjh@example.com * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.nfctools.ndef; import java.util.ArrayList; import java.util.List; import org.nfctools.ndef.auri.AbsoluteUriRecordDecoder; import org.nfctools.ndef.empty.EmptyRecordDecoder; import org.nfctools.ndef.ext.ExternalTypeDecoder; import org.nfctools.ndef.ext.ExternalTypeRecordConfig; import org.nfctools.ndef.mime.MimeRecordDecoder; import org.nfctools.ndef.unknown.UnknownRecordDecoder; import org.nfctools.ndef.unknown.unsupported.UnsupportedRecord; import org.nfctools.ndef.wkt.WellKnownRecordConfig; import org.nfctools.ndef.wkt.WellKnownRecordDecoder; import org.nfctools.ndef.wkt.decoder.RecordDecoder; public class NdefRecordDecoder { private WellKnownRecordDecoder wellKnownRecordDecoder = new WellKnownRecordDecoder(); private ExternalTypeDecoder externalTypeDecoder = new ExternalTypeDecoder(); private List<RecordDecoder<? extends Record>> recordDecoders = new ArrayList<RecordDecoder<? extends Record>>(); public NdefRecordDecoder() { recordDecoders.add(wellKnownRecordDecoder); recordDecoders.add(new AbsoluteUriRecordDecoder()); recordDecoders.add(new MimeRecordDecoder()); recordDecoders.add(externalTypeDecoder); recordDecoders.add(new EmptyRecordDecoder()); recordDecoders.add(new UnknownRecordDecoder()); } public Record decode(NdefRecord ndefRecord, NdefMessageDecoder messageDecoder) { if (ndefRecord.isChunked()) { throw new IllegalArgumentException("Cannot decode chunked record"); } for (RecordDecoder<? extends Record> decoder : recordDecoders) { if (decoder.canDecode(ndefRecord)) return decoder.decodeRecord(ndefRecord, messageDecoder); } // NFC Data Exchange Format (NDEF) 1.0: // An NDEF parser that receives an NDEF record with an unknown or unsupported TNF field value SHOULD treat it as 0x05 (Unknown). // It is RECOMMENDED that an NDEF parser receiving an NDEF record of this type, // without further context to its use, provides a mechanism for storing but not processing the payload. return new UnsupportedRecord(ndefRecord); } public void registerWellKnownRecordConfig(WellKnownRecordConfig recordconfig) { wellKnownRecordDecoder.addRecordConfig(recordconfig); } public void registerExternalTypeRecordConfig(ExternalTypeRecordConfig recordconfig) { externalTypeDecoder.addRecordConfig(recordconfig); } }
3e11dfe65a4052b2e051072be9e4f49e1a0b07f6
526
java
Java
eapli.base (20210331)/eapli.base/base.core/src/main/java/eapli/base/servicomanagement/dto/ServicoDTO.java
fabioalvesilva/help-desk-app
334f5fc02e10b6345a36e47bc4dfd10aa7098b0d
[ "MIT", "Unlicense" ]
null
null
null
eapli.base (20210331)/eapli.base/base.core/src/main/java/eapli/base/servicomanagement/dto/ServicoDTO.java
fabioalvesilva/help-desk-app
334f5fc02e10b6345a36e47bc4dfd10aa7098b0d
[ "MIT", "Unlicense" ]
null
null
null
eapli.base (20210331)/eapli.base/base.core/src/main/java/eapli/base/servicomanagement/dto/ServicoDTO.java
fabioalvesilva/help-desk-app
334f5fc02e10b6345a36e47bc4dfd10aa7098b0d
[ "MIT", "Unlicense" ]
null
null
null
25.047619
136
0.692015
7,531
package eapli.base.servicomanagement.dto; public class ServicoDTO { public String codUnico; public String titulo; public String descBreve; public String descCompleta; public String icone; public ServicoDTO(final String codigo, final String titulo, final String descBreve, final String descCompleta, final String icone) { this.codUnico = codigo; this.titulo = titulo; this.descBreve = descBreve; this.descCompleta = descCompleta; this.icone = icone; } }
3e11e047ffd1d9e29d618e37583313e115c0e605
1,291
java
Java
src/main/java/com/nativapps/arpia/database/entity/Month.java
ryctabo/java-jersey-jaxrs
deae0cd11087a6e4518f08aa32fe5e3ffd293f67
[ "MIT" ]
null
null
null
src/main/java/com/nativapps/arpia/database/entity/Month.java
ryctabo/java-jersey-jaxrs
deae0cd11087a6e4518f08aa32fe5e3ffd293f67
[ "MIT" ]
null
null
null
src/main/java/com/nativapps/arpia/database/entity/Month.java
ryctabo/java-jersey-jaxrs
deae0cd11087a6e4518f08aa32fe5e3ffd293f67
[ "MIT" ]
null
null
null
15.369048
47
0.407436
7,532
package com.nativapps.arpia.database.entity; /** * * @author Gustavo Pacheco <ychag@example.com> * @version 1.0 */ public enum Month { /** * Month of January. * <p> * Translate: Enero */ JANUARY, /** * Month of February. * <p> * Translate: Febrero */ FEBRUARY, /** * Month of March. * <p> * Translate: Marzo */ MARCH, /** * Month of April. * <p> * Translate: Abril */ APRIL, /** * Month of May. * <p> * Translate: Mayo */ MAY, /** * Month of June. * <p> * Translate: Junio */ JUNE, /** * Month of July. * <p> * Translate: Julio */ JULY, /** * Month of August. * <p> * Translate: Agosto */ AUGUST, /** * Month of September. * <p> * Translate: Septiembre */ SEPTEMBER, /** * Month of October. * <p> * Translate: Octubre */ OCTOBER, /** * Month of November. * <p> * Translate: Noviembre */ NOVEMBER, /** * Month of December. * <p> * Translate: Diciembre */ DECEMBER }
3e11e06d55fe515c36b500015e02bf3b85568cf1
149
java
Java
net/minecraft/world/chunk/storage/RegionFile$ChunkBuffer.java
OdshDays/corrosion
b568eba671262c47de32db1c09050151b25ce43c
[ "Unlicense" ]
null
null
null
net/minecraft/world/chunk/storage/RegionFile$ChunkBuffer.java
OdshDays/corrosion
b568eba671262c47de32db1c09050151b25ce43c
[ "Unlicense" ]
null
null
null
net/minecraft/world/chunk/storage/RegionFile$ChunkBuffer.java
OdshDays/corrosion
b568eba671262c47de32db1c09050151b25ce43c
[ "Unlicense" ]
null
null
null
49.666667
109
0.812081
7,533
public class RegionFile$ChunkBuffer { // Failed to decompile, took too long to decompile: net/minecraft/world/chunk/storage/RegionFile$ChunkBuffer }