language
stringclasses
1 value
repo
stringclasses
60 values
path
stringlengths
22
294
class_span
dict
source
stringlengths
13
1.16M
target
stringlengths
1
113
java
apache__hadoop
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
{ "start": 2153, "end": 17666 }
class ____ { private static final Logger LOG = LoggerFactory.getLogger(TestDFSHAAdmin.class); private DFSHAAdmin tool; private final ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream(); private final ByteArrayOutputStream outBytes = new ByteArrayOutputStream(); private String errOutput; private String output; private HAServiceProtocol mockProtocol; private ZKFCProtocol mockZkfcProtocol; private static final String NSID = "ns1"; private static final HAServiceStatus STANDBY_READY_RESULT = new HAServiceStatus(HAServiceState.STANDBY) .setReadyToBecomeActive(); private final ArgumentCaptor<StateChangeRequestInfo> reqInfoCaptor = ArgumentCaptor.forClass(StateChangeRequestInfo.class); private static final String HOST_A = "1.2.3.1"; private static final String HOST_B = "1.2.3.2"; // Fencer shell commands that always return true and false respectively // on Unix. private static final String FENCER_TRUE_COMMAND_UNIX = "shell(true)"; private static final String FENCER_FALSE_COMMAND_UNIX = "shell(false)"; // Fencer shell commands that always return true and false respectively // on Windows. Lacking POSIX 'true' and 'false' commands we use the DOS // commands 'rem' and 'help.exe'. private static final String FENCER_TRUE_COMMAND_WINDOWS = "shell(rem)"; private static final String FENCER_FALSE_COMMAND_WINDOWS = "shell(help.exe /? >NUL)"; private HdfsConfiguration getHAConf() { HdfsConfiguration conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMESERVICES, NSID); conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, NSID); conf.set(DFSUtil.addKeySuffixes( DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, NSID), "nn1,nn2"); conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1"); conf.set(DFSUtil.addKeySuffixes( DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, NSID, "nn1"), HOST_A + ":12345"); conf.set(DFSUtil.addKeySuffixes( DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, NSID, "nn2"), HOST_B + ":12345"); return conf; } public static String getFencerTrueCommand() { return Shell.WINDOWS ? FENCER_TRUE_COMMAND_WINDOWS : FENCER_TRUE_COMMAND_UNIX; } public static String getFencerFalseCommand() { return Shell.WINDOWS ? FENCER_FALSE_COMMAND_WINDOWS : FENCER_FALSE_COMMAND_UNIX; } @BeforeEach public void setup() throws IOException { mockProtocol = MockitoUtil.mockProtocol(HAServiceProtocol.class); mockZkfcProtocol = MockitoUtil.mockProtocol(ZKFCProtocol.class); tool = new DFSHAAdmin() { @Override protected HAServiceTarget resolveTarget(String nnId) { HAServiceTarget target = super.resolveTarget(nnId); HAServiceTarget spy = Mockito.spy(target); // OVerride the target to return our mock protocol try { Mockito.doReturn(mockProtocol).when(spy).getProxy( Mockito.<Configuration>any(), Mockito.anyInt()); Mockito.doReturn(mockZkfcProtocol).when(spy).getZKFCProxy( Mockito.<Configuration>any(), Mockito.anyInt()); } catch (IOException e) { throw new AssertionError(e); // mock setup doesn't really throw } return spy; } }; tool.setConf(getHAConf()); tool.setErrOut(new PrintStream(errOutBytes)); tool.setOut(new PrintStream(outBytes)); } private void assertOutputContains(String string) { if (!errOutput.contains(string) && !output.contains(string)) { fail("Expected output to contain '" + string + "' but err_output was:\n" + errOutput + "\n and output was: \n" + output); } } @Test public void testNameserviceOption() throws Exception { assertEquals(-1, runTool("-ns")); assertOutputContains("Missing nameservice ID"); assertEquals(-1, runTool("-ns", "ns1")); assertOutputContains("Missing command"); // "ns1" isn't defined but we check this lazily and help doesn't use the ns assertEquals(0, runTool("-ns", "ns1", "-help", "transitionToActive")); assertOutputContains("Transitions the service into Active"); } @Test public void testNamenodeResolution() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); assertEquals(0, runTool("-getServiceState", "nn1")); Mockito.verify(mockProtocol).getServiceStatus(); assertEquals(-1, runTool("-getServiceState", "undefined")); assertOutputContains( "Unable to determine service address for namenode 'undefined'"); } @Test public void testHelp() throws Exception { assertEquals(0, runTool("-help")); assertEquals(0, runTool("-help", "transitionToActive")); assertOutputContains("Transitions the service into Active"); } @Test public void testGetAllServiceState() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol) .getServiceStatus(); assertEquals(0, runTool("-getAllServiceState")); assertOutputContains(String.format("%-50s %-10s", (HOST_A + ":" + 12345), STANDBY_READY_RESULT.getState())); assertOutputContains(String.format("%-50s %-10s", (HOST_B + ":" + 12345), STANDBY_READY_RESULT.getState())); } @Test public void testTransitionToActive() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); assertEquals(0, runTool("-transitionToActive", "nn1")); Mockito.verify(mockProtocol).transitionToActive( reqInfoCaptor.capture()); assertEquals(RequestSource.REQUEST_BY_USER, reqInfoCaptor.getValue().getSource()); } /** * Test that, if automatic HA is enabled, none of the mutative operations * will succeed, unless the -forcemanual flag is specified. * @throws Exception */ @Test public void testMutativeOperationsWithAutoHaEnabled() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); // Turn on auto-HA in the config HdfsConfiguration conf = getHAConf(); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); // Should fail without the forcemanual flag assertEquals(-1, runTool("-transitionToActive", "nn1")); assertTrue(errOutput.contains("Refusing to manually manage")); assertEquals(-1, runTool("-transitionToStandby", "nn1")); assertTrue(errOutput.contains("Refusing to manually manage")); assertEquals(-1, runTool("-transitionToObserver", "nn1")); assertTrue(errOutput.contains("Refusing to manually manage")); Mockito.verify(mockProtocol, Mockito.never()) .transitionToActive(anyReqInfo()); Mockito.verify(mockProtocol, Mockito.never()) .transitionToStandby(anyReqInfo()); Mockito.verify(mockProtocol, Mockito.never()) .transitionToObserver(anyReqInfo()); // Force flag should bypass the check and change the request source // for the RPC setupConfirmationOnSystemIn(); assertEquals(0, runTool("-transitionToActive", "-forcemanual", "nn1")); setupConfirmationOnSystemIn(); assertEquals(0, runTool("-transitionToStandby", "-forcemanual", "nn1")); setupConfirmationOnSystemIn(); assertEquals(0, runTool("-transitionToObserver", "-forcemanual", "nn1")); Mockito.verify(mockProtocol, Mockito.times(1)).transitionToActive( reqInfoCaptor.capture()); Mockito.verify(mockProtocol, Mockito.times(1)).transitionToStandby( reqInfoCaptor.capture()); Mockito.verify(mockProtocol, Mockito.times(1)).transitionToObserver( reqInfoCaptor.capture()); // All of the RPCs should have had the "force" source for (StateChangeRequestInfo ri : reqInfoCaptor.getAllValues()) { assertEquals(RequestSource.REQUEST_BY_USER_FORCED, ri.getSource()); } } /** * Setup System.in with a stream that feeds a "yes" answer on the * next prompt. */ private static void setupConfirmationOnSystemIn() { // Answer "yes" to the prompt about transition to active System.setIn(new ByteArrayInputStream("yes\n".getBytes())); } /** * Test that, even if automatic HA is enabled, the monitoring operations * still function correctly. */ @Test public void testMonitoringOperationsWithAutoHaEnabled() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); // Turn on auto-HA HdfsConfiguration conf = getHAConf(); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true); tool.setConf(conf); assertEquals(0, runTool("-checkHealth", "nn1")); Mockito.verify(mockProtocol).monitorHealth(); assertEquals(0, runTool("-getServiceState", "nn1")); Mockito.verify(mockProtocol).getServiceStatus(); } @Test public void testTransitionToStandby() throws Exception { assertEquals(0, runTool("-transitionToStandby", "nn1")); Mockito.verify(mockProtocol).transitionToStandby(anyReqInfo()); } @Test public void testTransitionToObserver() throws Exception { assertEquals(0, runTool("-transitionToObserver", "nn1")); Mockito.verify(mockProtocol).transitionToObserver(anyReqInfo()); } @Test public void testFailoverWithNoFencerConfigured() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); assertEquals(-1, runTool("-failover", "nn1", "nn2")); } @Test public void testFailoverWithFencerConfigured() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf = getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-failover", "nn1", "nn2")); } @Test public void testFailoverWithFencerAndNameservice() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf = getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-ns", "ns1", "-failover", "nn1", "nn2")); } @Test public void testFailoverWithFencerConfiguredAndForce() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf = getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence")); } @Test public void testFailoverWithForceActive() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf = getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-failover", "nn1", "nn2", "--forceactive")); } @Test public void testFailoverWithInvalidFenceArg() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf = getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(-1, runTool("-failover", "nn1", "nn2", "notforcefence")); } @Test public void testFailoverWithFenceButNoFencer() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence")); } @Test public void testFailoverWithFenceAndBadFencer() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf = getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "foobar!"); tool.setConf(conf); assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence")); } @Test public void testFailoverWithAutoHa() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); // Turn on auto-HA in the config HdfsConfiguration conf = getHAConf(); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-failover", "nn1", "nn2")); Mockito.verify(mockZkfcProtocol).gracefulFailover(); } @Test public void testForceFenceOptionListedBeforeArgs() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf = getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2")); } @Test public void testGetServiceStatus() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); assertEquals(0, runTool("-getServiceState", "nn1")); Mockito.verify(mockProtocol).getServiceStatus(); } @Test public void testCheckHealth() throws Exception { assertEquals(0, runTool("-checkHealth", "nn1")); Mockito.verify(mockProtocol).monitorHealth(); Mockito.doThrow(new HealthCheckFailedException("fake health check failure")) .when(mockProtocol).monitorHealth(); assertEquals(-1, runTool("-checkHealth", "nn1")); assertOutputContains("Health check failed: fake health check failure"); } /** * Test that the fencing configuration can be overridden per-nameservice * or per-namenode */ @Test public void testFencingConfigPerNameNode() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); final String nsSpecificKey = DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY + "." + NSID; final String nnSpecificKey = nsSpecificKey + ".nn1"; HdfsConfiguration conf = getHAConf(); // Set the default fencer to succeed conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence")); // Set the NN-specific fencer to fail. Should fail to fence. conf.set(nnSpecificKey, getFencerFalseCommand()); tool.setConf(conf); assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence")); conf.unset(nnSpecificKey); // Set an NS-specific fencer to fail. Should fail. conf.set(nsSpecificKey, getFencerFalseCommand()); tool.setConf(conf); assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence")); // Set the NS-specific fencer to succeed. Should succeed conf.set(nsSpecificKey, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence")); } private Object runTool(String ... args) throws Exception { errOutBytes.reset(); outBytes.reset(); LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args)); int ret = tool.run(args); errOutput = new String(errOutBytes.toByteArray(), StandardCharsets.UTF_8); output = new String(outBytes.toByteArray(), StandardCharsets.UTF_8); LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output); return ret; } private StateChangeRequestInfo anyReqInfo() { return Mockito.any(); } }
TestDFSHAAdmin
java
FasterXML__jackson-databind
src/test/java/tools/jackson/databind/node/TreeWithTypeTest.java
{ "start": 547, "end": 724 }
class ____ { public String bar; public Foo() { } public Foo(String bar) { this.bar = bar; } } // [databind#353] public
Foo
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/dialect/function/json/CastTargetReturnTypeResolver.java
{ "start": 1280, "end": 2871 }
class ____ implements FunctionReturnTypeResolver { private final BasicType<?> defaultType; public CastTargetReturnTypeResolver(TypeConfiguration typeConfiguration) { this.defaultType = typeConfiguration.getBasicTypeForJavaType( String.class ); } @Override public ReturnableType<?> resolveFunctionReturnType( ReturnableType<?> impliedType, @Nullable SqmToSqlAstConverter converter, List<? extends SqmTypedNode<?>> arguments, TypeConfiguration typeConfiguration) { if ( arguments.size() > 2 ) { int castTargetIndex = 0; for ( int i = 2; i < arguments.size(); i++ ) { if (arguments.get( i ) instanceof SqmCastTarget<?> ) { castTargetIndex = i + 1; break; } } if ( castTargetIndex != 0 ) { ReturnableType<?> argType = extractArgumentType( arguments, castTargetIndex ); return isAssignableTo( argType, impliedType ) ? impliedType : argType; } } return defaultType; } @Override public BasicValuedMapping resolveFunctionReturnType( Supplier<BasicValuedMapping> impliedTypeAccess, List<? extends SqlAstNode> arguments) { if ( arguments.size() > 2 ) { int castTargetIndex = 0; for ( int i = 2; i < arguments.size(); i++ ) { if (arguments.get( i ) instanceof CastTarget ) { castTargetIndex = i + 1; break; } } if ( castTargetIndex != 0 ) { final BasicValuedMapping specifiedArgType = extractArgumentValuedMapping( arguments, castTargetIndex ); return useImpliedTypeIfPossible( specifiedArgType, impliedTypeAccess.get() ); } } return defaultType; } }
CastTargetReturnTypeResolver
java
elastic__elasticsearch
x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcCsvSpecIT.java
{ "start": 521, "end": 1188 }
class ____ extends CsvSpecTestCase { public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { super(fileName, groupName, testName, lineNumber, testCase); } @Override protected Settings restClientSettings() { return RestSqlIT.securitySettings(); } @Override protected String getProtocol() { return RestSqlIT.SSL_ENABLED ? "https" : "http"; } @Override protected Properties connectionProperties() { Properties sp = super.connectionProperties(); sp.putAll(JdbcSecurityIT.adminProperties()); return sp; } }
JdbcCsvSpecIT
java
spring-projects__spring-boot
documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/messaging/pulsar/sending/MyBean.java
{ "start": 814, "end": 1069 }
class ____ { private final PulsarTemplate<String> pulsarTemplate; public MyBean(PulsarTemplate<String> pulsarTemplate) { this.pulsarTemplate = pulsarTemplate; } public void someMethod() { this.pulsarTemplate.send("someTopic", "Hello"); } }
MyBean
java
elastic__elasticsearch
x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/RetentionPolicyToDeleteByQueryRequestConverter.java
{ "start": 1276, "end": 1520 }
class ____ { private static final String DATE_FORMAT = "strict_date_optional_time"; private static final DateFormatter DATE_FORMATER = DateFormatter.forPattern(DATE_FORMAT); public static
RetentionPolicyToDeleteByQueryRequestConverter
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/collections/custom/basic/IMyList.java
{ "start": 191, "end": 232 }
interface ____<X> extends List<X> { }
IMyList
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/connections/CurrentSessionConnectionTest.java
{ "start": 455, "end": 835 }
class ____ extends AggressiveReleaseTest { @Override protected Session getSessionUnderTest(SessionFactoryScope scope) { return scope.getSessionFactory().getCurrentSession(); } @Override protected void release(Session session, SessionFactoryScope scope) { // do nothing, txn synch should release session as part of current-session definition } }
CurrentSessionConnectionTest
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java
{ "start": 14440, "end": 14922 }
class ____ classMatcher = tcClassPattern.matcher(line); if (classMatcher.matches()) { int classId = Integer.parseInt(classMatcher.group(1)); if (classId >= MIN_CONTAINER_CLASS_ID) { currentClassId = classId; continue; } } //Check if we encountered a stats line Matcher bytesMatcher = bytesPattern.matcher(line); if (bytesMatcher.matches()) { //we found at least one
Matcher
java
apache__hadoop
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTcpNoDelay.java
{ "start": 8010, "end": 13395 }
class ____ extends Socket { private final Socket wrapped; private boolean tcpNoDelay; public SocketWrapper(Socket socket) { this.wrapped = socket; } // Override methods, check whether tcpnodelay has been set for each socket // created. This isn't perfect, as we could still send before tcpnodelay // is set, but should at least trigger when tcpnodelay is never set at all. @Override public void connect(SocketAddress endpoint) throws IOException { wrapped.connect(endpoint); } @Override public void connect(SocketAddress endpoint, int timeout) throws IOException { wrapped.connect(endpoint, timeout); } @Override public void bind(SocketAddress bindpoint) throws IOException { wrapped.bind(bindpoint); } @Override public InetAddress getInetAddress() { return wrapped.getInetAddress(); } @Override public InetAddress getLocalAddress() { return wrapped.getLocalAddress(); } @Override public int getPort() { return wrapped.getPort(); } @Override public int getLocalPort() { return wrapped.getLocalPort(); } @Override public SocketAddress getRemoteSocketAddress() { return wrapped.getRemoteSocketAddress(); } @Override public SocketAddress getLocalSocketAddress() { return wrapped.getLocalSocketAddress(); } @Override public SocketChannel getChannel() { return wrapped.getChannel(); } @Override public InputStream getInputStream() throws IOException { return wrapped.getInputStream(); } @Override public OutputStream getOutputStream() throws IOException { return wrapped.getOutputStream(); } @Override public void setTcpNoDelay(boolean on) throws SocketException { wrapped.setTcpNoDelay(on); this.tcpNoDelay = on; } @Override public boolean getTcpNoDelay() throws SocketException { return wrapped.getTcpNoDelay(); } @Override public void setSoLinger(boolean on, int linger) throws SocketException { wrapped.setSoLinger(on, linger); } @Override public int getSoLinger() throws SocketException { return wrapped.getSoLinger(); } @Override public void sendUrgentData(int data) throws IOException { wrapped.sendUrgentData(data); } @Override public void setOOBInline(boolean on) throws SocketException { wrapped.setOOBInline(on); } @Override public boolean getOOBInline() throws SocketException { return wrapped.getOOBInline(); } @Override public synchronized void setSoTimeout(int timeout) throws SocketException { wrapped.setSoTimeout(timeout); } @Override public synchronized int getSoTimeout() throws SocketException { return wrapped.getSoTimeout(); } @Override public synchronized void setSendBufferSize(int size) throws SocketException { wrapped.setSendBufferSize(size); } @Override public synchronized int getSendBufferSize() throws SocketException { return wrapped.getSendBufferSize(); } @Override public synchronized void setReceiveBufferSize(int size) throws SocketException { wrapped.setReceiveBufferSize(size); } @Override public synchronized int getReceiveBufferSize() throws SocketException { return wrapped.getReceiveBufferSize(); } @Override public void setKeepAlive(boolean on) throws SocketException { wrapped.setKeepAlive(on); } @Override public boolean getKeepAlive() throws SocketException { return wrapped.getKeepAlive(); } @Override public void setTrafficClass(int tc) throws SocketException { wrapped.setTrafficClass(tc); } @Override public int getTrafficClass() throws SocketException { return wrapped.getTrafficClass(); } @Override public void setReuseAddress(boolean on) throws SocketException { wrapped.setReuseAddress(on); } @Override public boolean getReuseAddress() throws SocketException { return wrapped.getReuseAddress(); } @Override public synchronized void close() throws IOException { wrapped.close(); } @Override public void shutdownInput() throws IOException { wrapped.shutdownInput(); } @Override public void shutdownOutput() throws IOException { wrapped.shutdownOutput(); } @Override public String toString() { return wrapped.toString(); } @Override public boolean isConnected() { return wrapped.isConnected(); } @Override public boolean isBound() { return wrapped.isBound(); } @Override public boolean isClosed() { return wrapped.isClosed(); } @Override public boolean isInputShutdown() { return wrapped.isInputShutdown(); } @Override public boolean isOutputShutdown() { return wrapped.isOutputShutdown(); } @Override public void setPerformancePreferences(int connectionTime, int latency, int bandwidth) { wrapped.setPerformancePreferences(connectionTime, latency, bandwidth); } public boolean getLastTcpNoDelay() { return tcpNoDelay; } } }
SocketWrapper
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/query/CachedQueryShallowTest.java
{ "start": 1676, "end": 6544 }
class ____ { public final static String HQL = "select e from Employee e"; @BeforeEach public void setUp(EntityManagerFactoryScope scope) { scope.inTransaction( em -> { for ( int i = 0; i < 10; i++ ) { Manager manager = new Manager( i, "Manager" + i ); for ( int j = 0; j < 1; j++ ) { manager.addAssociate( new Employee( i * 10 + j, "John" + ( i * 10 + j ) ) ); } em.persist( manager ); } } ); Statistics stats = getStatistics( scope ); assertEquals( 0, stats.getQueryCacheHitCount() ); assertEquals( 0, stats.getQueryCacheMissCount() ); assertEquals( 0, stats.getQueryCachePutCount() ); assertEquals( 0, stats.getSecondLevelCacheHitCount() ); assertEquals( 0, stats.getSecondLevelCacheMissCount() ); assertEquals( 10, stats.getSecondLevelCachePutCount() ); } @AfterEach public void tearDown(EntityManagerFactoryScope scope) { scope.getEntityManagerFactory().getSchemaManager().truncate(); } @Test public void testCacheableQuery(EntityManagerFactoryScope scope) { Statistics stats = getStatistics( scope ); stats.clear(); // First time the query is executed, query and results are cached. scope.inTransaction( em -> { List<Employee> employees = getEmployees( em ); assertThatAnSQLQueryHasBeenExecuted( stats ); assertEquals( 0, stats.getQueryCacheHitCount() ); assertEquals( 1, stats.getQueryCacheMissCount() ); assertEquals( 1, stats.getQueryCachePutCount() ); assertEquals( 0, stats.getSecondLevelCacheHitCount() ); assertEquals( 0, stats.getSecondLevelCacheMissCount() ); assertEquals( 0, stats.getSecondLevelCachePutCount() ); } ); stats.clear(); // Second time the query is executed, list of entities are read from query cache scope.inTransaction( em -> { List<Employee> employees = getEmployees( em ); assertThatNoSQLQueryHasBeenExecuted( stats ); assertEquals( 1, stats.getQueryCacheHitCount() ); assertEquals( 0, stats.getQueryCacheMissCount() ); assertEquals( 0, stats.getQueryCachePutCount() ); assertEquals( 10, stats.getSecondLevelCacheHitCount() ); assertEquals( 0, stats.getSecondLevelCacheMissCount() ); assertEquals( 0, stats.getSecondLevelCachePutCount() ); } ); // NOTE: JPACache.evictAll() only evicts entity regions; // it does not evict the collection regions or query cache region scope.getEntityManagerFactory().getCache().evictAll(); stats.clear(); scope.inTransaction( em -> { List<Employee> employees = getEmployees( em ); // query is still found in the cache assertThatNoSQLQueryHasBeenExecuted( stats ); assertEquals( 1, stats.getQueryCacheHitCount() ); assertEquals( 0, stats.getQueryCacheMissCount() ); assertEquals( 0, stats.getQueryCachePutCount() ); assertEquals( 0, stats.getSecondLevelCacheHitCount() ); assertEquals( 10, stats.getSecondLevelCacheMissCount() ); assertEquals( 10, stats.getSecondLevelCachePutCount() ); } ); stats.clear(); // this time call clear the entity regions and the query cache region scope.inTransaction( em -> { em.getEntityManagerFactory().getCache().evictAll(); em.unwrap( SessionImplementor.class ) .getFactory() .getCache() .evictQueryRegions(); List<Employee> employees = getEmployees( em ); // query is no longer found in the cache assertThatAnSQLQueryHasBeenExecuted( stats ); assertEquals( 0, stats.getQueryCacheHitCount() ); assertEquals( 1, stats.getQueryCacheMissCount() ); assertEquals( 1, stats.getQueryCachePutCount() ); assertEquals( 0, stats.getSecondLevelCacheHitCount() ); assertEquals( 0, stats.getSecondLevelCacheMissCount() ); assertEquals( 10, stats.getSecondLevelCachePutCount() ); } ); } private static Statistics getStatistics(EntityManagerFactoryScope scope) { return ( (SessionFactoryImplementor) scope.getEntityManagerFactory() ).getStatistics(); } private static List<Employee> getEmployees(EntityManager em) { TypedQuery<Employee> query = em.createQuery( HQL, Employee.class ) .setHint( HINT_CACHEABLE, true ); List<Employee> employees = query.getResultList(); assertEquals( 10, employees.size() ); for ( Employee employee : employees ) { assertEquals( "John" + employee.getId(), employee.getName() ); } return employees; } private static void assertThatAnSQLQueryHasBeenExecuted(Statistics stats) { assertEquals( 1, stats.getQueryStatistics( HQL ).getExecutionCount() ); } private static void assertThatNoSQLQueryHasBeenExecuted(Statistics stats) { assertEquals( 0, stats.getQueryStatistics( HQL ).getExecutionCount() ); } @Entity(name = "Employee") @Cache(usage = CacheConcurrencyStrategy.READ_WRITE) public static
CachedQueryShallowTest
java
spring-projects__spring-security
core/src/main/java/org/springframework/security/core/context/DeferredSecurityContext.java
{ "start": 725, "end": 873 }
interface ____ allows delayed access to a {@link SecurityContext} that may be * generated. * * @author Steve Riesenberg * @since 5.8 */ public
that
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/query/hql/internal/BasicDotIdentifierConsumer.java
{ "start": 1752, "end": 1868 }
enum ____ references, e.g. {@code Sex.MALE} * <li>navigable-path * </ul> * * @author Steve Ebersole */ public
value
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/RocketMQEndpointBuilderFactory.java
{ "start": 42866, "end": 43193 }
class ____ extends AbstractEndpointBuilder implements RocketMQEndpointBuilder, AdvancedRocketMQEndpointBuilder { public RocketMQEndpointBuilderImpl(String path) { super(componentName, path); } } return new RocketMQEndpointBuilderImpl(path); } }
RocketMQEndpointBuilderImpl
java
mapstruct__mapstruct
processor/src/main/java/org/mapstruct/ap/internal/model/presence/AllPresenceChecksPresenceCheck.java
{ "start": 585, "end": 1814 }
class ____ extends ModelElement implements PresenceCheck { private final Collection<PresenceCheck> presenceChecks; public AllPresenceChecksPresenceCheck(Collection<PresenceCheck> presenceChecks) { this.presenceChecks = presenceChecks; } public Collection<PresenceCheck> getPresenceChecks() { return presenceChecks; } @Override public PresenceCheck negate() { return new NegatePresenceCheck( this ); } @Override public Set<Type> getImportTypes() { Set<Type> importTypes = new HashSet<>(); for ( PresenceCheck presenceCheck : presenceChecks ) { importTypes.addAll( presenceCheck.getImportTypes() ); } return importTypes; } @Override public boolean equals(Object o) { if ( this == o ) { return true; } if ( o == null || getClass() != o.getClass() ) { return false; } AllPresenceChecksPresenceCheck that = (AllPresenceChecksPresenceCheck) o; return Objects.equals( presenceChecks, that.presenceChecks ); } @Override public int hashCode() { return Objects.hash( presenceChecks ); } }
AllPresenceChecksPresenceCheck
java
alibaba__nacos
core/src/main/java/com/alibaba/nacos/core/paramcheck/ExtractorManager.java
{ "start": 1726, "end": 2439 }
interface ____ { /** * Configure a Class to locate a specific Extractor, which takes effect only on the @Controller annotated class * or method. * * @return Class<? extends AbstractHttpParamExtractor> */ Class<? extends AbstractHttpParamExtractor> httpExtractor() default DefaultHttpExtractor.class; /** * Configure a Class to locate a specific Extractor, which takes effect only on grpcHandler. * * @return Class<? extends AbstractRpcParamExtractor> */ Class<? extends AbstractRpcParamExtractor> rpcExtractor() default DefaultGrpcExtractor.class; } public static
Extractor
java
alibaba__druid
core/src/test/java/com/alibaba/druid/bvt/sql/oracle/create/OracleCreateIndexTest17_global.java
{ "start": 967, "end": 2513 }
class ____ extends OracleTest { public void test_0() throws Exception { String sql = // "CREATE INDEX dbobjs_idx ON dbobjs (created) global"; List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.ORACLE); SQLStatement stmt = statementList.get(0); print(statementList); assertEquals(1, statementList.size()); SchemaStatVisitor visitor = SQLUtils.createSchemaStatVisitor(JdbcConstants.ORACLE); stmt.accept(visitor); System.out.println("Tables : " + visitor.getTables()); System.out.println("fields : " + visitor.getColumns()); System.out.println("coditions : " + visitor.getConditions()); System.out.println("relationships : " + visitor.getRelationships()); System.out.println("orderBy : " + visitor.getOrderByColumns()); assertEquals("CREATE INDEX dbobjs_idx ON dbobjs(created) GLOBAL", SQLUtils.toSQLString(stmt, JdbcConstants.ORACLE)); assertEquals(1, visitor.getTables().size()); assertTrue(visitor.getTables().containsKey(new TableStat.Name("dbobjs"))); assertEquals(1, visitor.getColumns().size()); // assertTrue(visitor.getColumns().contains(new TableStat.Column("xwarehouses", "sales_rep_id"))); // assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "YEAR"))); // assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "order_mode"))); } }
OracleCreateIndexTest17_global
java
quarkusio__quarkus
independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/matching/ResourceClassMergeTest.java
{ "start": 665, "end": 2239 }
class ____ { @RegisterExtension static ResteasyReactiveUnitTest test = new ResteasyReactiveUnitTest() .setArchiveProducer(new Supplier<>() { @Override public JavaArchive get() { return ShrinkWrap.create(JavaArchive.class) .addClasses(MatchDefaultRegexDifferentNameResourceA.class, MatchDefaultRegexDifferentNameResourceB.class, MatchCustomRegexDifferentNameResourceA.class, MatchCustomRegexDifferentNameResourceB.class); } }); @Test public void testCallMatchDefaultRegexDifferentNameResource() { given() .when().get("routing-broken/abc/some/other/path") .then() .statusCode(200) .body(is("abc")); given() .when().get("routing-broken/efg/some/path") .then() .statusCode(200) .body(is("efg")); } @Test public void testCallMatchCustomRegexDifferentNameResource() { given() .when().get("routing-broken-custom-regex/abc/some/other/path") .then() .statusCode(200) .body(is("abc")); given() .when().get("routing-broken-custom-regex/efg/some/path") .then() .statusCode(200) .body(is("efg")); } @Path("/routing-broken/{id1}") public static
ResourceClassMergeTest
java
apache__flink
flink-runtime/src/test/java/org/apache/flink/runtime/security/token/hadoop/HadoopFSDelegationTokenProviderITCase.java
{ "start": 1856, "end": 7726 }
class ____ extends Token<TestHadoopDelegationTokenIdentifier> { private long newExpiration; public TestDelegationToken( Text tokenService, TestHadoopDelegationTokenIdentifier identifier, long newExpiration) { super(identifier.getBytes(), new byte[4], identifier.getKind(), tokenService); this.newExpiration = newExpiration; } public TestDelegationToken( Text tokenService, TestHadoopDelegationTokenIdentifier identifier) { this(tokenService, identifier, 0L); } @Override public long renew(Configuration conf) { return newExpiration; } } @Test public void getRenewerShouldReturnNullByDefault() throws Exception { HadoopFSDelegationTokenProvider provider = new HadoopFSDelegationTokenProvider(); provider.init(new org.apache.flink.configuration.Configuration()); assertNull(provider.getRenewer()); } @Test public void getRenewerShouldReturnConfiguredRenewer() throws Exception { String renewer = "testRenewer"; HadoopFSDelegationTokenProvider provider = new HadoopFSDelegationTokenProvider(); org.apache.flink.configuration.Configuration configuration = new org.apache.flink.configuration.Configuration(); configuration.setString("security.kerberos.token.provider.hadoopfs.renewer", renewer); provider.init(configuration); assertEquals(renewer, provider.getRenewer()); } @Test public void getTokenRenewalIntervalShouldReturnNoneWhenNoTokens() throws IOException { HadoopFSDelegationTokenProvider provider = new HadoopFSDelegationTokenProvider() { @Override protected void obtainDelegationTokens( String renewer, Set<FileSystem> fileSystemsToAccess, Credentials credentials) {} }; Clock constantClock = Clock.fixed(ofEpochMilli(0), ZoneId.systemDefault()); assertEquals( Optional.empty(), provider.getTokenRenewalInterval(constantClock, Collections.emptySet())); } @Test public void getTokenRenewalIntervalShouldReturnMinWhenMultipleTokens() throws IOException { Clock constantClock = Clock.fixed(ofEpochMilli(NOW), ZoneId.systemDefault()); HadoopFSDelegationTokenProvider provider = new HadoopFSDelegationTokenProvider() { @Override protected void obtainDelegationTokens( String renewer, Set<FileSystem> fileSystemsToAccess, Credentials credentials) { TestHadoopDelegationTokenIdentifier tokenIdentifier1 = new TestHadoopDelegationTokenIdentifier(NOW); credentials.addToken( tokenService1, new TestDelegationToken(tokenService1, tokenIdentifier1, NOW + 1)); TestHadoopDelegationTokenIdentifier tokenIdentifier2 = new TestHadoopDelegationTokenIdentifier(NOW); credentials.addToken( tokenService2, new TestDelegationToken(tokenService2, tokenIdentifier2, NOW + 2)); } }; assertEquals( Optional.of(1L), provider.getTokenRenewalInterval(constantClock, Collections.emptySet())); } @Test public void getTokenRenewalDateShouldReturnNoneWhenNegativeRenewalInterval() { HadoopFSDelegationTokenProvider provider = new HadoopFSDelegationTokenProvider(); Clock constantClock = Clock.fixed(ofEpochMilli(0), ZoneId.systemDefault()); Credentials credentials = new Credentials(); assertEquals( Optional.empty(), provider.getTokenRenewalDate(constantClock, credentials, -1)); } @Test public void getTokenRenewalDateShouldReturnNoneWhenNoTokens() { HadoopFSDelegationTokenProvider provider = new HadoopFSDelegationTokenProvider(); Clock constantClock = Clock.fixed(ofEpochMilli(0), ZoneId.systemDefault()); Credentials credentials = new Credentials(); assertEquals(Optional.empty(), provider.getTokenRenewalDate(constantClock, credentials, 1)); } @Test public void getTokenRenewalDateShouldReturnMinWhenMultipleTokens() { HadoopFSDelegationTokenProvider provider = new HadoopFSDelegationTokenProvider(); Clock constantClock = Clock.fixed(ofEpochMilli(NOW), ZoneId.systemDefault()); Credentials credentials = new Credentials(); TestHadoopDelegationTokenIdentifier tokenIdentifier1 = new TestHadoopDelegationTokenIdentifier(NOW); credentials.addToken( tokenService1, new TestDelegationToken(tokenService1, tokenIdentifier1)); TestHadoopDelegationTokenIdentifier tokenIdentifier2 = new TestHadoopDelegationTokenIdentifier(NOW + 1); credentials.addToken( tokenService2, new TestDelegationToken(tokenService2, tokenIdentifier2)); assertEquals( Optional.of(NOW + 1), provider.getTokenRenewalDate(constantClock, credentials, 1)); } @Test public void obtainDelegationTokenWithStandaloneDeployment() throws Exception { HadoopFSDelegationTokenProvider provider = new HadoopFSDelegationTokenProvider(); provider.init(new org.apache.flink.configuration.Configuration()); assertNotNull(provider.obtainDelegationTokens()); } }
TestDelegationToken
java
apache__camel
components/camel-knative/camel-knative-api/src/main/java/org/apache/camel/component/knative/spi/KnativeEnvironment.java
{ "start": 7188, "end": 9378 }
class ____ { private final Knative.Type type; private final String name; private Knative.EndpointKind endpointKind; private String url; private Map<String, String> metadata; public KnativeServiceBuilder(Knative.Type type, String name) { this.type = type; this.name = name; } public KnativeServiceBuilder withUrl(String url) { this.url = url; return this; } public KnativeServiceBuilder withUrlf(String format, Object... args) { return withUrl(String.format(format, args)); } public KnativeServiceBuilder withEndpointKind(Knative.EndpointKind endpointKind) { this.endpointKind = endpointKind; return this; } public KnativeServiceBuilder withMeta(Map<String, String> metadata) { if (metadata == null) { return this; } if (this.metadata == null) { this.metadata = new HashMap<>(); } this.metadata.putAll(metadata); return this; } public KnativeServiceBuilder withMeta(String key, String value) { if (key == null || value == null) { return this; } if (this.metadata == null) { this.metadata = new HashMap<>(); } this.metadata.put(key, value); return this; } public KnativeServiceBuilder withMeta(String key, Enum<?> e) { if (key == null || e == null) { return this; } if (this.metadata == null) { this.metadata = new HashMap<>(); } this.metadata.put(key, e.name()); return this; } public KnativeResource build() { KnativeResource answer = new KnativeResource(); answer.setType(type); answer.setEndpointKind(endpointKind); answer.setName(name); answer.setUrl(url); answer.setMetadata(metadata); return answer; } } }
KnativeServiceBuilder
java
apache__camel
components/camel-mongodb/src/test/java/org/apache/camel/component/mongodb/processor/idempotent/integration/MongoDbIdempotentRepositoryIT.java
{ "start": 1408, "end": 4104 }
class ____ extends AbstractMongoDbITSupport { MongoDbIdempotentRepository repo; @BeforeEach @AfterEach public void clearDB() { testCollection.deleteMany(new Document()); } @BeforeEach public void setupIdempotentRepository() { repo = new MongoDbIdempotentRepository(mongo, testCollectionName, dbName); repo.start(); } @Test public void add() { String randomUUIDString = UUID.randomUUID().toString(); boolean added = repo.add(randomUUIDString); assertEquals(1, testCollection.countDocuments(), "Driver inserted document"); assertTrue(added, "Add ui returned true"); } @Test public void addAndContains() { String randomUUIDString = UUID.randomUUID().toString(); repo.add(randomUUIDString); assertEquals(1, testCollection.countDocuments()); boolean found = repo.contains(randomUUIDString); assertTrue(found, "Added uid was found"); } @Test public void addAndRemove() { String randomUUIDString = UUID.randomUUID().toString(); repo.add(randomUUIDString); assertEquals(1, testCollection.countDocuments()); boolean removed = repo.remove(randomUUIDString); assertTrue(removed, "Added uid was removed correctly"); assertEquals(0, testCollection.countDocuments()); } @Test public void addDuplicatedFails() { String randomUUIDString = UUID.randomUUID().toString(); repo.add(randomUUIDString); assertEquals(1, testCollection.countDocuments()); boolean added = repo.add(randomUUIDString); assertFalse(added, "Duplicated entry was not added"); assertEquals(1, testCollection.countDocuments()); } @Test public void deleteMissingiIsFailse() { String randomUUIDString = UUID.randomUUID().toString(); assertEquals(0, testCollection.countDocuments()); boolean removed = repo.remove(randomUUIDString); assertFalse(removed, "Non exisint uid returns false"); } @Test public void containsMissingReturnsFalse() { String randomUUIDString = UUID.randomUUID().toString(); boolean found = repo.contains(randomUUIDString); assertFalse(found, "Non existing item is not found"); } @Test public void confirmAllwaysReturnsTrue() { String randomUUIDString = UUID.randomUUID().toString(); boolean found = repo.confirm(randomUUIDString); assertTrue(found, "Confirm always returns true"); found = repo.confirm(null); assertTrue(found, "Confirm always returns true, even with null"); } }
MongoDbIdempotentRepositoryIT
java
apache__camel
core/camel-api/src/main/java/org/apache/camel/spi/BeanIntrospection.java
{ "start": 1697, "end": 8012 }
class ____ { public Method method; public Boolean isGetter; public Boolean isSetter; public String getterOrSetterShorthandName; public Boolean hasGetterAndSetter; } // Statistics // ---------------------------------------------------- /** * Number of times bean introspection has been invoked */ long getInvokedCounter(); /** * Reset the statistics counters. */ void resetCounters(); /** * Whether to gather extended statistics for introspection usage. */ boolean isExtendedStatistics(); /** * Whether to gather extended statistics for introspection usage. */ void setExtendedStatistics(boolean extendedStatistics); /** * Logging level used for logging introspection usage. Is using TRACE level as default. */ LoggingLevel getLoggingLevel(); /** * Logging level used for logging introspection usage. Is using TRACE level as default. */ void setLoggingLevel(LoggingLevel loggingLevel); // Introspection // ---------------------------------------------------- /** * Will inspect the target for properties. * <p/> * Notice a property must have both a getter/setter method to be included. Notice all <tt>null</tt> values will be * included. * * @param target the target bean * @param properties the map to fill in found properties * @param optionPrefix an optional prefix to append the property key * @return <tt>true</tt> if any properties was found, <tt>false</tt> otherwise. */ boolean getProperties(Object target, Map<String, Object> properties, String optionPrefix); /** * Will inspect the target for properties. * <p/> * Notice a property must have both a getter/setter method to be included. * * @param target the target bean * @param properties the map to fill in found properties * @param optionPrefix an optional prefix to append the property key * @param includeNull whether to include <tt>null</tt> values * @return <tt>true</tt> if any properties was found, <tt>false</tt> otherwise. */ boolean getProperties(Object target, Map<String, Object> properties, String optionPrefix, boolean includeNull); /** * Introspects the given class. * * @param clazz the class * @return the introspection result as a {@link ClassInfo} structure. */ ClassInfo cacheClass(Class<?> clazz); /** * Clears the introspection cache. */ void clearCache(); /** * Number of classes in the introspection cache. */ long getCachedClassesCounter(); /** * Gets the property or else returning the default value. * * @param target the target bean * @param propertyName the property name * @param defaultValue the default value * @param ignoreCase whether to ignore case for matching the property name * @return the property value, or the default value if the target does not have a property with the * given name */ Object getOrElseProperty(Object target, String propertyName, Object defaultValue, boolean ignoreCase); /** * Gets the getter method for the property. * * @param type the target class * @param propertyName the property name * @param ignoreCase whether to ignore case for matching the property name * @return the getter method * @throws NoSuchMethodException is thrown if there are no getter method for the property */ Method getPropertyGetter(Class<?> type, String propertyName, boolean ignoreCase) throws NoSuchMethodException; /** * Gets the setter method for the property. * * @param type the target class * @param propertyName the property name * @return the setter method * @throws NoSuchMethodException is thrown if there are no setter method for the property */ Method getPropertySetter(Class<?> type, String propertyName) throws NoSuchMethodException; /** * This method supports three modes to set a property: * * 1. Setting a Map property where the property name refers to a map via name[aKey] where aKey is the map key to * use. * * 2. Setting a property that has already been resolved, this is the case when {@code context} and {@code refName} * are NULL and {@code value} is non-NULL. * * 3. Setting a property that has not yet been resolved, the property will be resolved based on the suitable methods * found matching the property name on the {@code target} bean. For this mode to be triggered the parameters * {@code context} and {@code refName} must NOT be NULL, and {@code value} MUST be NULL. */ boolean setProperty(CamelContext context, Object target, String name, Object value) throws Exception; /** * This method supports three modes to set a property: * * 1. Setting a Map property where the property name refers to a map via name[aKey] where aKey is the map key to * use. * * 2. Setting a property that has already been resolved, this is the case when {@code context} and {@code refName} * are NULL and {@code value} is non-NULL. * * 3. Setting a property that has not yet been resolved, the property will be resolved based on the suitable methods * found matching the property name on the {@code target} bean. For this mode to be triggered the parameters * {@code context} and {@code refName} must NOT be NULL, and {@code value} MUST be NULL. */ boolean setProperty( CamelContext context, TypeConverter typeConverter, Object target, String name, Object value, String refName, boolean allowBuilderPattern, boolean allowPrivateSetter, boolean ignoreCase) throws Exception; /** * Find all the setter methods on the class */ Set<Method> findSetterMethods( Class<?> clazz, String name, boolean allowBuilderPattern, boolean allowPrivateSetter, boolean ignoreCase); }
MethodInfo
java
quarkusio__quarkus
independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/core/multipart/MultipartParser.java
{ "start": 13557, "end": 13822 }
class ____ implements Encoding { @Override public void handle(final PartHandler handler, final ByteBuffer rawData) throws IOException { handler.data(rawData); rawData.clear(); } } private static
IdentityEncoding
java
spring-projects__spring-framework
spring-beans/src/test/java/org/springframework/beans/factory/DefaultListableBeanFactoryTests.java
{ "start": 143243, "end": 143559 }
class ____ { private FactoryBean<?> factoryBean; public FactoryBean<?> getFactoryBean() { return this.factoryBean; } public void setFactoryBean(FactoryBean<?> factoryBean) { this.factoryBean = factoryBean; } } @SuppressWarnings({ "unchecked", "rawtypes" }) private static
FactoryBeanDependentBean
java
spring-projects__spring-boot
module/spring-boot-flyway/src/main/java/org/springframework/boot/flyway/autoconfigure/FlywayConnectionDetails.java
{ "start": 1735, "end": 1866 }
class ____ of the driver * specified in the JDBC URL or {@code null} when no JDBC URL is configured. * @return the JDBC driver
name
java
apache__rocketmq
remoting/src/test/java/org/apache/rocketmq/remoting/netty/MockChannelPromise.java
{ "start": 1198, "end": 4537 }
class ____ implements ChannelPromise { protected Channel channel; public MockChannelPromise(Channel channel) { this.channel = channel; } @Override public Channel channel() { return channel; } @Override public ChannelPromise setSuccess(Void result) { return this; } @Override public ChannelPromise setSuccess() { return this; } @Override public boolean trySuccess() { return false; } @Override public ChannelPromise setFailure(Throwable cause) { return this; } @Override public ChannelPromise addListener(GenericFutureListener<? extends Future<? super Void>> listener) { return this; } @Override public ChannelPromise addListeners(GenericFutureListener<? extends Future<? super Void>>... listeners) { return this; } @Override public ChannelPromise removeListener(GenericFutureListener<? extends Future<? super Void>> listener) { return this; } @Override public ChannelPromise removeListeners(GenericFutureListener<? extends Future<? super Void>>... listeners) { return this; } @Override public ChannelPromise sync() throws InterruptedException { return this; } @Override public ChannelPromise syncUninterruptibly() { return this; } @Override public ChannelPromise await() throws InterruptedException { return this; } @Override public ChannelPromise awaitUninterruptibly() { return this; } @Override public ChannelPromise unvoid() { return this; } @Override public boolean isVoid() { return false; } @Override public boolean trySuccess(Void result) { return false; } @Override public boolean tryFailure(Throwable cause) { return false; } @Override public boolean setUncancellable() { return false; } @Override public boolean isSuccess() { return false; } @Override public boolean isCancellable() { return false; } @Override public Throwable cause() { return null; } @Override public boolean await(long timeout, TimeUnit unit) throws InterruptedException { return false; } @Override public boolean await(long timeoutMillis) throws InterruptedException { return false; } @Override public boolean awaitUninterruptibly(long timeout, TimeUnit unit) { return false; } @Override public boolean awaitUninterruptibly(long timeoutMillis) { return false; } @Override public Void getNow() { return null; } @Override public boolean cancel(boolean mayInterruptIfRunning) { return false; } @Override public boolean isCancelled() { return false; } @Override public boolean isDone() { return false; } @Override public Void get() throws InterruptedException, ExecutionException { return null; } @Override public Void get(long timeout, @NotNull java.util.concurrent.TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { return null; } }
MockChannelPromise
java
micronaut-projects__micronaut-core
core-processor/src/main/java/io/micronaut/inject/annotation/internal/JavaxPersistenceContextAnnotationMapper.java
{ "start": 1191, "end": 2049 }
class ____ implements NamedAnnotationMapper { private static final String SOURCE_ANNOTATION = "javax.persistence.PersistenceContext"; @Override public String getName() { return SOURCE_ANNOTATION; } @Override public List<AnnotationValue<?>> map(AnnotationValue<Annotation> annotation, VisitorContext visitorContext) { final String name = annotation.stringValue("name").orElse(null); if (name != null) { return Arrays.asList( AnnotationValue.builder(AnnotationUtil.INJECT).build(), AnnotationValue.builder(AnnotationUtil.NAMED).value(name).build() ); } else { return Collections.singletonList( AnnotationValue.builder(AnnotationUtil.INJECT).build() ); } } }
JavaxPersistenceContextAnnotationMapper
java
spring-projects__spring-framework
spring-context/src/test/java/org/springframework/context/annotation/Spr12278Tests.java
{ "start": 983, "end": 2002 }
class ____ { private AnnotationConfigApplicationContext context; @AfterEach void close() { if (context != null) { context.close(); } } @Test void componentSingleConstructor() { this.context = new AnnotationConfigApplicationContext(BaseConfiguration.class, SingleConstructorComponent.class); assertThat(this.context.getBean(SingleConstructorComponent.class).autowiredName).isEqualTo("foo"); } @Test void componentTwoConstructorsNoHint() { this.context = new AnnotationConfigApplicationContext(BaseConfiguration.class, TwoConstructorsComponent.class); assertThat(this.context.getBean(TwoConstructorsComponent.class).name).isEqualTo("fallback"); } @Test void componentTwoSpecificConstructorsNoHint() { assertThatExceptionOfType(BeanCreationException.class).isThrownBy(() -> new AnnotationConfigApplicationContext(BaseConfiguration.class, TwoSpecificConstructorsComponent.class)) .withMessageContaining("No default constructor found"); } @Configuration static
Spr12278Tests
java
mockito__mockito
mockito-core/src/test/java/org/mockito/exceptions/stacktrace/StackTraceCleanerTest.java
{ "start": 330, "end": 2073 }
class ____ { private DefaultStackTraceCleaner cleaner = new DefaultStackTraceCleaner(); @Test public void allow_or_disallow_mockito_mockito_objects_in_stacktrace() throws Exception { assertAcceptedInStackTrace("my.custom.Type"); assertRejectedInStackTrace("org.mockito.foo.Bar"); assertAcceptedInStackTrace("org.mockito.internal.junit.JUnitRule"); assertAcceptedInStackTrace("org.mockito.junit.AllTypesOfThisPackage"); assertAcceptedInStackTrace("org.mockito.junit.subpackage.AllTypesOfThisPackage"); assertAcceptedInStackTrace("org.mockito.runners.AllTypesOfThisPackage"); assertAcceptedInStackTrace("org.mockito.runners.subpackage.AllTypesOfThisPackage"); assertAcceptedInStackTrace("org.mockito.internal.runners.AllTypesOfThisPackage"); assertAcceptedInStackTrace("org.mockito.internal.runners.subpackage.AllTypesOfThisPackage"); assertRejectedInStackTrace("my.custom.Type$$EnhancerByMockitoWithCGLIB$$Foo"); assertRejectedInStackTrace("my.custom.Type$MockitoMock$Foo"); } private void assertAcceptedInStackTrace(String className) { assertThat(cleaner.isIn(stackTraceElementWith(className))) .describedAs("Must be accepted in stacktrace %s", className) .isTrue(); } private void assertRejectedInStackTrace(String className) { assertThat(cleaner.isIn(stackTraceElementWith(className))) .describedAs("Must be rejected in stacktrace %s", className) .isFalse(); } private StackTraceElement stackTraceElementWith(String className) { return new StackTraceElement(className, "methodName", null, -1); } }
StackTraceCleanerTest
java
hibernate__hibernate-orm
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/query/ids/EmbIdOneToManyQuery.java
{ "start": 1112, "end": 7767 }
class ____ { private EmbId id1; private EmbId id2; private EmbId id3; private EmbId id4; @BeforeClassTemplate public void initData(EntityManagerFactoryScope scope) { id1 = new EmbId( 0, 1 ); id2 = new EmbId( 10, 11 ); id3 = new EmbId( 20, 21 ); id4 = new EmbId( 30, 31 ); // Revision 1 scope.inTransaction( entityManager -> { SetRefIngEmbIdEntity refIng1 = new SetRefIngEmbIdEntity( id1, "x", null ); SetRefIngEmbIdEntity refIng2 = new SetRefIngEmbIdEntity( id2, "y", null ); entityManager.persist( refIng1 ); entityManager.persist( refIng2 ); } ); // Revision 2 scope.inTransaction( entityManager -> { SetRefEdEmbIdEntity refEd3 = new SetRefEdEmbIdEntity( id3, "a" ); SetRefEdEmbIdEntity refEd4 = new SetRefEdEmbIdEntity( id4, "a" ); entityManager.persist( refEd3 ); entityManager.persist( refEd4 ); SetRefIngEmbIdEntity refIng1 = entityManager.find( SetRefIngEmbIdEntity.class, id1 ); SetRefIngEmbIdEntity refIng2 = entityManager.find( SetRefIngEmbIdEntity.class, id2 ); refIng1.setReference( refEd3 ); refIng2.setReference( refEd4 ); } ); // Revision 3 scope.inTransaction( entityManager -> { SetRefEdEmbIdEntity refEd3 = entityManager.find( SetRefEdEmbIdEntity.class, id3 ); SetRefIngEmbIdEntity refIng2 = entityManager.find( SetRefIngEmbIdEntity.class, id2 ); refIng2.setReference( refEd3 ); } ); } @Test public void testEntitiesReferencedToId3(EntityManagerFactoryScope scope) { scope.inEntityManager( em -> { Set rev1_related = new HashSet( AuditReaderFactory.get( em ).createQuery() .forEntitiesAtRevision( SetRefIngEmbIdEntity.class, 1 ) .add( AuditEntity.relatedId( "reference" ).eq( id3 ) ) .getResultList() ); Set rev1 = new HashSet( AuditReaderFactory.get( em ).createQuery() .forEntitiesAtRevision( SetRefIngEmbIdEntity.class, 1 ) .add( AuditEntity.property( "reference" ).eq( new SetRefEdEmbIdEntity( id3, null ) ) ) .getResultList() ); Set rev2_related = new HashSet( AuditReaderFactory.get( em ).createQuery() .forEntitiesAtRevision( SetRefIngEmbIdEntity.class, 2 ) .add( AuditEntity.relatedId( "reference" ).eq( id3 ) ) .getResultList() ); Set rev2 = new HashSet( AuditReaderFactory.get( em ).createQuery() .forEntitiesAtRevision( SetRefIngEmbIdEntity.class, 2 ) .add( AuditEntity.property( "reference" ).eq( new SetRefEdEmbIdEntity( id3, null ) ) ) .getResultList() ); Set rev3_related = new HashSet( AuditReaderFactory.get( em ).createQuery() .forEntitiesAtRevision( SetRefIngEmbIdEntity.class, 3 ) .add( AuditEntity.relatedId( "reference" ).eq( id3 ) ) .getResultList() ); Set rev3 = new HashSet( AuditReaderFactory.get( em ).createQuery() .forEntitiesAtRevision( SetRefIngEmbIdEntity.class, 3 ) .add( AuditEntity.property( "reference" ).eq( new SetRefEdEmbIdEntity( id3, null ) ) ) .getResultList() ); assertEquals( rev1_related, rev1 ); assertEquals( rev2_related, rev2 ); assertEquals( rev3_related, rev3 ); assertEquals( TestTools.makeSet(), rev1 ); assertEquals( TestTools.makeSet( new SetRefIngEmbIdEntity( id1, "x", null ) ), rev2 ); assertEquals( TestTools.makeSet( new SetRefIngEmbIdEntity( id1, "x", null ), new SetRefIngEmbIdEntity( id2, "y", null ) ), rev3 ); } ); } @Test public void testEntitiesReferencedToId4(EntityManagerFactoryScope scope) { scope.inEntityManager( em -> { Set rev1_related = new HashSet( AuditReaderFactory.get( em ).createQuery() .forEntitiesAtRevision( SetRefIngEmbIdEntity.class, 1 ) .add( AuditEntity.relatedId( "reference" ).eq( id4 ) ) .getResultList() ); Set rev2_related = new HashSet( AuditReaderFactory.get( em ).createQuery() .forEntitiesAtRevision( SetRefIngEmbIdEntity.class, 2 ) .add( AuditEntity.relatedId( "reference" ).eq( id4 ) ) .getResultList() ); Set rev3_related = new HashSet( AuditReaderFactory.get( em ).createQuery() .forEntitiesAtRevision( SetRefIngEmbIdEntity.class, 3 ) .add( AuditEntity.relatedId( "reference" ).eq( id4 ) ) .getResultList() ); assertEquals( TestTools.makeSet(), rev1_related ); assertEquals( TestTools.makeSet( new SetRefIngEmbIdEntity( id2, "y", null ) ), rev2_related ); assertEquals( TestTools.makeSet(), rev3_related ); } ); } @Test public void testEntitiesReferencedByIng1ToId3(EntityManagerFactoryScope scope) { scope.inEntityManager( em -> { List rev1_related = AuditReaderFactory.get( em ).createQuery() .forEntitiesAtRevision( SetRefIngEmbIdEntity.class, 1 ) .add( AuditEntity.relatedId( "reference" ).eq( id3 ) ) .add( AuditEntity.id().eq( id1 ) ) .getResultList(); Object rev2_related = AuditReaderFactory.get( em ).createQuery() .forEntitiesAtRevision( SetRefIngEmbIdEntity.class, 2 ) .add( AuditEntity.relatedId( "reference" ).eq( id3 ) ) .add( AuditEntity.id().eq( id1 ) ) .getSingleResult(); Object rev3_related = AuditReaderFactory.get( em ).createQuery() .forEntitiesAtRevision( SetRefIngEmbIdEntity.class, 3 ) .add( AuditEntity.relatedId( "reference" ).eq( id3 ) ) .add( AuditEntity.id().eq( id1 ) ) .getSingleResult(); assertEquals( 0, rev1_related.size() ); assertEquals( new SetRefIngEmbIdEntity( id1, "x", null ), rev2_related ); assertEquals( new SetRefIngEmbIdEntity( id1, "x", null ), rev3_related ); } ); } @Test public void testEntitiesReferencedByIng2ToId3(EntityManagerFactoryScope scope) { scope.inEntityManager( em -> { List rev1_related = AuditReaderFactory.get( em ).createQuery() .forEntitiesAtRevision( SetRefIngEmbIdEntity.class, 1 ) .add( AuditEntity.relatedId( "reference" ).eq( id3 ) ) .add( AuditEntity.id().eq( id2 ) ) .getResultList(); List rev2_related = AuditReaderFactory.get( em ).createQuery() .forEntitiesAtRevision( SetRefIngEmbIdEntity.class, 2 ) .add( AuditEntity.relatedId( "reference" ).eq( id3 ) ) .add( AuditEntity.id().eq( id2 ) ) .getResultList(); Object rev3_related = AuditReaderFactory.get( em ).createQuery() .forEntitiesAtRevision( SetRefIngEmbIdEntity.class, 3 ) .add( AuditEntity.relatedId( "reference" ).eq( id3 ) ) .add( AuditEntity.id().eq( id2 ) ) .getSingleResult(); assertEquals( 0, rev1_related.size() ); assertEquals( 0, rev2_related.size() ); assertEquals( new SetRefIngEmbIdEntity( id2, "y", null ), rev3_related ); } ); } }
EmbIdOneToManyQuery
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/sql/results/graph/entity/internal/DiscriminatedEntityInitializer.java
{ "start": 1331, "end": 2052 }
class ____ extends AbstractInitializer<DiscriminatedEntityInitializer.DiscriminatedEntityInitializerData> implements EntityInitializer<DiscriminatedEntityInitializer.DiscriminatedEntityInitializerData> { protected final InitializerParent<?> parent; private final NavigablePath navigablePath; private final boolean isPartOfKey; private final DomainResultAssembler<?> discriminatorValueAssembler; private final DomainResultAssembler<?> keyValueAssembler; private final DiscriminatedAssociationModelPart fetchedPart; private final boolean eager; private final boolean resultInitializer; private final boolean keyIsEager; private final boolean hasLazySubInitializer; public static
DiscriminatedEntityInitializer
java
micronaut-projects__micronaut-core
http-client-core/src/main/java/io/micronaut/http/client/bind/binders/QueryValueClientArgumentRequestBinder.java
{ "start": 1633, "end": 5790 }
class ____ implements AnnotatedClientArgumentRequestBinder<QueryValue> { private final ConversionService conversionService; public QueryValueClientArgumentRequestBinder(ConversionService conversionService) { this.conversionService = conversionService; } @Override @NonNull public Class<QueryValue> getAnnotationType() { return QueryValue.class; } /** * If value can be converted to ConvertibleMultiValues, then use it and add it to the uriContext.queryParameters. * The ConvertibleMultiValues converters are found in * {@link io.micronaut.core.convert.converters.MultiValuesConverterFactory} and perform conversion only when the * {@link io.micronaut.core.convert.format.Format} annotation has one of the supported values. * Otherwise, if the {@link io.micronaut.core.convert.format.Format} annotation is present, it is converted to {@link String}. If none of these * are satisfied, the{@link io.micronaut.http.uri.UriTemplate} decides what to do with the given value which * is supplied as an Object (it is added to uriContext.pathParameter). * * <br> By default value is converted to ConvertibleMultiValues when the {@link io.micronaut.core.convert.format.Format} annotation is present and has * one of the defined above formats. Otherwise, empty optional is returned. * * <br> The default {@link io.micronaut.http.uri.UriTemplate} will convert the value to String and to parameters. * Optionally, the value can be formatted if the path template states so. */ @Override public void bind( @NonNull ArgumentConversionContext<Object> context, @NonNull ClientRequestUriContext uriContext, @NonNull Object value, @NonNull MutableHttpRequest<?> request ) { String parameterName = context.getAnnotationMetadata().stringValue(QueryValue.class) .filter(StringUtils::isNotEmpty) .orElse(context.getArgument().getName()); final UriMatchVariable uriVariable = uriContext.getUriTemplate().getVariables() .stream() .filter(v -> v.getName().equals(parameterName)) .findFirst() .orElse(null); if (uriVariable != null) { if (uriVariable.isExploded()) { uriContext.setPathParameter(parameterName, value); } else { String convertedValue = conversionService.convert(value, ConversionContext.STRING.with(context.getAnnotationMetadata())) .filter(StringUtils::isNotEmpty) .orElse(null); if (convertedValue != null) { uriContext.setPathParameter(parameterName, convertedValue); } else { uriContext.setPathParameter(parameterName, value); } } } else { ArgumentConversionContext<ConvertibleMultiValues> conversionContext = context.with( Argument.of(ConvertibleMultiValues.class, context.getArgument().getName(), context.getAnnotationMetadata())); final Optional<ConvertibleMultiValues<String>> multiValues = conversionService.convert(value, conversionContext) .map(values -> (ConvertibleMultiValues<String>) values); if (multiValues.isPresent()) { Map<String, List<String>> queryParameters = uriContext.getQueryParameters(); // Add all the parameters multiValues.get().forEach((k, v) -> { if (queryParameters.containsKey(k)) { queryParameters.get(k).addAll(v); } else { queryParameters.put(k, v); } }); } else { conversionService.convert(value, ConversionContext.STRING.with(context.getAnnotationMetadata())) .ifPresent(v -> uriContext.addQueryParameter(parameterName, v)); } } } }
QueryValueClientArgumentRequestBinder
java
apache__camel
components/camel-salesforce/camel-salesforce-component/src/main/java/org/apache/camel/component/salesforce/api/MultiSelectPicklistSerializer.java
{ "start": 1279, "end": 1653 }
class ____ extends StdSerializer<Object> { private static final long serialVersionUID = 3064638196900557354L; protected MultiSelectPicklistSerializer() { super(Object.class); } @Override public void serialize(Object value, JsonGenerator jgen, SerializerProvider provider) throws IOException { // get Picklist
MultiSelectPicklistSerializer
java
spring-projects__spring-security
acl/src/test/java/org/springframework/security/acls/sid/SidTests.java
{ "start": 7481, "end": 7982 }
class ____ extends AbstractAuthenticationToken { private CustomToken principal; CustomAuthenticationToken(CustomToken principal, Collection<GrantedAuthority> authorities) { super(authorities); this.principal = principal; } @Override public Object getCredentials() { return null; } @Override public CustomToken getPrincipal() { return this.principal; } @Override public String getName() { return this.principal.getName(); } } static
CustomAuthenticationToken
java
alibaba__druid
core/src/test/java/com/alibaba/druid/bvt/sql/oracle/create/OracleCreateViewTest14.java
{ "start": 1062, "end": 20966 }
class ____ extends OracleTest { public void test_types() throws Exception { String sql = // "CREATE OR REPLACE FORCE VIEW \"CITSONLINE\".\"VIEW_SK_ORDER_APPLY\" (\"ORDER_ID\", \"PRODUCT_TYPE\", \"ORDER_AGENT\", \"COLLECT_MONEY\", \"CUSTOMER_ID\", \"ADD_USER\", \"ADD_DATE\", \"TEAM_NAME\", \"TEAM_NO\", \"CONTACT\", \"ORDER_ACCOUNT\") AS \n" + " select id as order_id,\n" + " 'PW' product_type,\n" + " order_owner as order_agent,\n" + " account_receivable as collect_money,\n" + " customer_group as customer_id,\n" + " operator_id as add_user,\n" + " order_date as add_date,\n" + " '' as team_name,\n" + " '' as team_no,\n" + " customer_name as contact,\n" + " (select t.name from operator t where t.account = at.ouser_id) order_account\n" + " from ticket_product_order pw_order_info, allot_track at\n" + " where at.order_id like '%' || pw_order_info.id || '%'\n" + " and at.produce_id = 'PW'\n" + " and at.end_date = to_date('1900-01-01', 'yyyy-mm-dd') --hn --766ߡh (\n" + " and pw_order_info.status = '4'\n" + "union\n" + "select insurance_order. order_id as order_id,\n" + " 'BX' product_type,\n" + " insurance_order.agent_id as order_agent,\n" + " insurance_order. all_price as collect_money,\n" + " insurance_order. custom_id as customer_id,\n" + " insurance_order. add_user,\n" + " insurance_order. add_date,\n" + " '' as team_name,\n" + " '' as team_no,\n" + " insurance_order. contact_name as contact,\n" + " (select t.name from operator t where t.account = at.ouser_id) order_account\n" + " from insurance_order insurance_order, allot_track at\n" + " where insurance_order.status = '3' --iU\n" + " and at.order_id_s = insurance_order.order_id\n" + " and at.produce_id = 'BX'\n" + " and at.end_date = to_date('1900-01-01', 'yyyy-mm-dd')\n" + "union\n" + "select t.order_id as order_id,\n" + " tt.product_type product_type,\n" + " tt.true_agent_id as order_agent,\n" + " t.price as collect_money,\n" + " t.custom_id as customer_id,\n" + " t.add_user,\n" + " t.add_date,\n" + " tb.name as team_name,\n" + " tb.team_no as team_no,\n" + " t.person as contact,\n" + " (select t.name from operator t where t.account = tt.od_user) order_account\n" + " from order_info t, order_flow_info tt, team_baseinfo tb\n" + " where t.product_type in ('0', '7')\n" + " and t.order_id = tt.order_id\n" + " and tt.user_type = '0'\n" + " and (tt.status = '3' or tt.status = '4' or tt.status = '5')\n" + " and tt.product_id = tb.team_id\n" + " --3n,4c,5c*6>\n" + "\n" + " union\n" + "/* hU*/\n" + "\n" + "select t.order_id as order_id,\n" + " 'GN' product_type,\n" + " t.agent_id as order_agent,\n" + " t.all_price as collect_money,\n" + " t.olduserid as customer_id,\n" + " t.add_user as add_user,\n" + " t.add_date as add_date,\n" + " '' as team_name,\n" + " '' as team_no,\n" + " t.client_name as contact,\n" + " (select o.name\n" + " from operator o, allot_track a\n" + " where o.account = a.ouser_id\n" + " and a.order_id_s = t.order_id\n" + " and a.agent_id_s = o.agent_id\n" + " and a.agent_id_s = t.agent_id\n" + " and a.end_date =\n" + " to_timestamp('1900-1-1', 'yyyy-mm-dd hh24:mi:ssxff')) order_account\n" + " from ticket_order_info t\n" + " where t.product_type = 'GN'\n" + " and (t.order_status = '6' or t.ticket_status = '2')\n" + "\n" + "union\n" + "\n" + "/* Eh*/\n" + "select t.order_id as order_id,\n" + " 'GJ' product_type,\n" + " t.agent_id as order_agent,\n" + " t.all_price as collect_money,\n" + " t.olduserid as customer_id,\n" + " t.add_user as add_user,\n" + " t.add_date as add_date,\n" + " '' as team_name,\n" + " '' as team_no,\n" + " t.client_name as contact,\n" + " (select o.name\n" + " from operator o, allot_track a\n" + " where o.account = a.ouser_id\n" + " and a.order_id_s = t.order_id\n" + " and a.agent_id_s = t.agent_id\n" + " and a.agent_id_s = o.agent_id\n" + " and a.end_date =\n" + " to_timestamp('1900-1-1', 'yyyy-mm-dd hh24:mi:ssxff')) order_account\n" + " from ticket_order_info t\n" + " where t.product_type = 'GJ'\n" + " and t.order_id like '02%'\n" + " and (t.order_status = '3' or t.ticket_status = '2')\n" + " union\n" + "--~\"5\">~,6\">ӗ\n" + "select voi.order_id as order_id,\n" + " 'QZ' product_type,\n" + " ofi.true_agent_id as order_agent,\n" + " ofi.sell_sum,\n" + " voi.custom_id as customer_id,\n" + " voi.add_user,\n" + " voi.add_date,\n" + " ofi.bal_team_id as team_name,\n" + " ofi.bal_team_no as team_no,\n" + " voi.user_name as contact,\n" + " (select op.name\n" + " from operator op, allot_track al\n" + " where op.account = al.ouser_id\n" + " and al.order_id_s = ofi.order_id\n" + " and al.supply_id_s = ofi.supply_id\n" + " and al.agent_id_s = ofi.agent_id\n" + " and al.end_date = to_timestamp('1900-01-01', 'yyyy-MM-dd')\n" + " and al.produce_Id = 'QZ') order_account\n" + " from order_flow_info ofi, visa_order_info voi\n" + " where ofi.product_type = 'QZ'\n" + " and voi.order_id = ofi.order_id\n" + " and ofi.user_type = '0'\n" + " and (ofi.status = '5' or ofi.status = '6')\n" + "\n" + "\n" + " union\n" + "--R3an,3bn*,7B2C6>n\n" + "select res.reservation_id,\n" + " 'FD' product_type,\n" + " res.agent_id,\n" + " res.total_sale_amt sale_amt,\n" + " bus.customer_id,\n" + " bus.operator,\n" + " bus.create_date,\n" + " '' as team_name,\n" + " '' as team_no,\n" + " res.contact_person as contact,\n" + " (select t.name\n" + " from operator t, allot_track a\n" + " where t.account = a.ouser_id\n" + " and bus.reservation_id = a.order_id\n" + " and bus.agent_id = a.agent_id\n" + " and a.end_date =\n" + " to_timestamp('1900-1-1', 'yyyy-mm-dd hh24:mi:ssxff')) order_account\n" + " from resaccount res, businessres bus\n" + " where bus.reservation_id = res.reservation_id\n" + " and (bus.confirm_status = '3' or bus.confirm_status = '7')"; System.out.println(sql); OracleStatementParser parser = new OracleStatementParser(sql); List<SQLStatement> statementList = parser.parseStatementList(); SQLStatement stmt = statementList.get(0); print(statementList); assertEquals(1, statementList.size()); assertEquals("CREATE OR REPLACE VIEW \"CITSONLINE\".\"VIEW_SK_ORDER_APPLY\" (\n" + "\t\"ORDER_ID\", \n" + "\t\"PRODUCT_TYPE\", \n" + "\t\"ORDER_AGENT\", \n" + "\t\"COLLECT_MONEY\", \n" + "\t\"CUSTOMER_ID\", \n" + "\t\"ADD_USER\", \n" + "\t\"ADD_DATE\", \n" + "\t\"TEAM_NAME\", \n" + "\t\"TEAM_NO\", \n" + "\t\"CONTACT\", \n" + "\t\"ORDER_ACCOUNT\"\n" + ")\n" + "AS\n" + "SELECT id AS order_id, 'PW' AS product_type, order_owner AS order_agent, account_receivable AS collect_money, customer_group AS customer_id\n" + "\t, operator_id AS add_user, order_date AS add_date, NULL AS team_name, NULL AS team_no, customer_name AS contact\n" + "\t, (\n" + "\t\tSELECT t.name\n" + "\t\tFROM operator t\n" + "\t\tWHERE t.account = at.ouser_id\n" + "\t) AS order_account\n" + "FROM ticket_product_order pw_order_info, allot_track at\n" + "WHERE at.order_id LIKE '%' || pw_order_info.id || '%'\n" + "\tAND at.produce_id = 'PW'\n" + "\tAND at.end_date = to_date('1900-01-01', 'yyyy-mm-dd') -- hn --766ߡh (\n" + "\tAND pw_order_info.status = '4'\n" + "UNION\n" + "SELECT insurance_order.order_id AS order_id, 'BX' AS product_type, insurance_order.agent_id AS order_agent, insurance_order.all_price AS collect_money, insurance_order.custom_id AS customer_id\n" + "\t, insurance_order.add_user, insurance_order.add_date, NULL AS team_name, NULL AS team_no, insurance_order.contact_name AS contact\n" + "\t, (\n" + "\t\tSELECT t.name\n" + "\t\tFROM operator t\n" + "\t\tWHERE t.account = at.ouser_id\n" + "\t) AS order_account\n" + "FROM insurance_order insurance_order, allot_track at\n" + "WHERE insurance_order.status = '3' -- iU\n" + "\tAND at.order_id_s = insurance_order.order_id\n" + "\tAND at.produce_id = 'BX'\n" + "\tAND at.end_date = to_date('1900-01-01', 'yyyy-mm-dd')\n" + "UNION\n" + "SELECT t.order_id AS order_id, tt.product_type AS product_type, tt.true_agent_id AS order_agent, t.price AS collect_money, t.custom_id AS customer_id\n" + "\t, t.add_user, t.add_date, tb.name AS team_name, tb.team_no AS team_no, t.person AS contact\n" + "\t, (\n" + "\t\tSELECT t.name\n" + "\t\tFROM operator t\n" + "\t\tWHERE t.account = tt.od_user\n" + "\t) AS order_account\n" + "FROM order_info t, order_flow_info tt, team_baseinfo tb\n" + "WHERE t.product_type IN ('0', '7')\n" + "\tAND t.order_id = tt.order_id\n" + "\tAND tt.user_type = '0'\n" + "\tAND (tt.status = '3'\n" + "\t\tOR tt.status = '4'\n" + "\t\tOR tt.status = '5')\n" + "\tAND tt.product_id = tb.team_id -- 3n,4c,5c*6>\n" + "UNION\n" + "/* hU*/\n" + "SELECT t.order_id AS order_id, 'GN' AS product_type, t.agent_id AS order_agent, t.all_price AS collect_money, t.olduserid AS customer_id\n" + "\t, t.add_user AS add_user, t.add_date AS add_date, NULL AS team_name, NULL AS team_no, t.client_name AS contact\n" + "\t, (\n" + "\t\tSELECT o.name\n" + "\t\tFROM operator o, allot_track a\n" + "\t\tWHERE o.account = a.ouser_id\n" + "\t\t\tAND a.order_id_s = t.order_id\n" + "\t\t\tAND a.agent_id_s = o.agent_id\n" + "\t\t\tAND a.agent_id_s = t.agent_id\n" + "\t\t\tAND a.end_date = to_timestamp('1900-1-1', 'yyyy-mm-dd hh24:mi:ssxff')\n" + "\t) AS order_account\n" + "FROM ticket_order_info t\n" + "WHERE t.product_type = 'GN'\n" + "\tAND (t.order_status = '6'\n" + "\t\tOR t.ticket_status = '2')\n" + "UNION\n" + "/* Eh*/\n" + "SELECT t.order_id AS order_id, 'GJ' AS product_type, t.agent_id AS order_agent, t.all_price AS collect_money, t.olduserid AS customer_id\n" + "\t, t.add_user AS add_user, t.add_date AS add_date, NULL AS team_name, NULL AS team_no, t.client_name AS contact\n" + "\t, (\n" + "\t\tSELECT o.name\n" + "\t\tFROM operator o, allot_track a\n" + "\t\tWHERE o.account = a.ouser_id\n" + "\t\t\tAND a.order_id_s = t.order_id\n" + "\t\t\tAND a.agent_id_s = t.agent_id\n" + "\t\t\tAND a.agent_id_s = o.agent_id\n" + "\t\t\tAND a.end_date = to_timestamp('1900-1-1', 'yyyy-mm-dd hh24:mi:ssxff')\n" + "\t) AS order_account\n" + "FROM ticket_order_info t\n" + "WHERE t.product_type = 'GJ'\n" + "\tAND t.order_id LIKE '02%'\n" + "\tAND (t.order_status = '3'\n" + "\t\tOR t.ticket_status = '2')\n" + "UNION\n" + "-- ~\"5\">~,6\">ӗ\n" + "SELECT voi.order_id AS order_id, 'QZ' AS product_type, ofi.true_agent_id AS order_agent, ofi.sell_sum, voi.custom_id AS customer_id\n" + "\t, voi.add_user, voi.add_date, ofi.bal_team_id AS team_name, ofi.bal_team_no AS team_no, voi.user_name AS contact\n" + "\t, (\n" + "\t\tSELECT op.name\n" + "\t\tFROM operator op, allot_track al\n" + "\t\tWHERE op.account = al.ouser_id\n" + "\t\t\tAND al.order_id_s = ofi.order_id\n" + "\t\t\tAND al.supply_id_s = ofi.supply_id\n" + "\t\t\tAND al.agent_id_s = ofi.agent_id\n" + "\t\t\tAND al.end_date = to_timestamp('1900-01-01', 'yyyy-MM-dd')\n" + "\t\t\tAND al.produce_Id = 'QZ'\n" + "\t) AS order_account\n" + "FROM order_flow_info ofi, visa_order_info voi\n" + "WHERE ofi.product_type = 'QZ'\n" + "\tAND voi.order_id = ofi.order_id\n" + "\tAND ofi.user_type = '0'\n" + "\tAND (ofi.status = '5'\n" + "\t\tOR ofi.status = '6')\n" + "UNION\n" + "-- R3an,3bn*,7B2C6>n\n" + "SELECT res.reservation_id, 'FD' AS product_type, res.agent_id, res.total_sale_amt AS sale_amt, bus.customer_id\n" + "\t, bus.operator, bus.create_date, NULL AS team_name, NULL AS team_no, res.contact_person AS contact\n" + "\t, (\n" + "\t\tSELECT t.name\n" + "\t\tFROM operator t, allot_track a\n" + "\t\tWHERE t.account = a.ouser_id\n" + "\t\t\tAND bus.reservation_id = a.order_id\n" + "\t\t\tAND bus.agent_id = a.agent_id\n" + "\t\t\tAND a.end_date = to_timestamp('1900-1-1', 'yyyy-mm-dd hh24:mi:ssxff')\n" + "\t) AS order_account\n" + "FROM resaccount res, businessres bus\n" + "WHERE bus.reservation_id = res.reservation_id\n" + "\tAND (bus.confirm_status = '3'\n" + "\t\tOR bus.confirm_status = '7')", SQLUtils.toSQLString(stmt, JdbcConstants.ORACLE)); OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor(); stmt.accept(visitor); System.out.println("Tables : " + visitor.getTables()); System.out.println("fields : " + visitor.getColumns()); System.out.println("coditions : " + visitor.getConditions()); System.out.println("relationships : " + visitor.getRelationships()); System.out.println("orderBy : " + visitor.getOrderByColumns()); assertEquals(11, visitor.getTables().size()); assertEquals(75, visitor.getColumns().size()); assertTrue(visitor.getColumns().contains(new TableStat.Column("ticket_product_order", "status"))); } }
OracleCreateViewTest14
java
micronaut-projects__micronaut-core
management/src/main/java/io/micronaut/management/endpoint/health/HealthLevelOfDetail.java
{ "start": 667, "end": 844 }
class ____ a set of common constants to indicate the level of detail to be included in the Health status response.</p> * * @author Sergio del Amo * @since 1.0 */ public
provides
java
apache__flink
flink-tests/src/test/java/org/apache/flink/test/state/StateHandleSerializationTest.java
{ "start": 1244, "end": 1862 }
class ____ { @Test public void ensureStateHandlesHaveSerialVersionUID() throws Exception { Reflections reflections = new Reflections("org.apache.flink"); // check all state handles @SuppressWarnings("unchecked") Set<Class<?>> stateHandleImplementations = (Set<Class<?>>) (Set<?>) reflections.getSubTypesOf(StateObject.class); for (Class<?> clazz : stateHandleImplementations) { validataSerialVersionUID(clazz); } } private static void validataSerialVersionUID(Class<?> clazz) { // all non-
StateHandleSerializationTest
java
grpc__grpc-java
xds/src/main/java/io/grpc/xds/internal/security/SecurityProtocolNegotiators.java
{ "start": 11431, "end": 13612 }
class ____ extends ChannelInboundHandlerAdapter { private final GrpcHttp2ConnectionHandler grpcHandler; @Nullable private final ProtocolNegotiator fallbackProtocolNegotiator; HandlerPickerHandler( GrpcHttp2ConnectionHandler grpcHandler, @Nullable ProtocolNegotiator fallbackProtocolNegotiator) { this.grpcHandler = checkNotNull(grpcHandler, "grpcHandler"); this.fallbackProtocolNegotiator = fallbackProtocolNegotiator; } @Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { if (evt instanceof ProtocolNegotiationEvent) { ProtocolNegotiationEvent pne = (ProtocolNegotiationEvent)evt; SslContextProviderSupplier sslContextProviderSupplier = InternalProtocolNegotiationEvent .getAttributes(pne).get(ATTR_SERVER_SSL_CONTEXT_PROVIDER_SUPPLIER); if (sslContextProviderSupplier == null) { logger.log(Level.FINE, "No sslContextProviderSupplier found in filterChainMatch " + "for connection from {0} to {1}", new Object[]{ctx.channel().remoteAddress(), ctx.channel().localAddress()}); if (fallbackProtocolNegotiator == null) { ctx.fireExceptionCaught(new CertStoreException("No certificate source found!")); return; } logger.log(Level.FINE, "Using fallback credentials for connection from {0} to {1}", new Object[]{ctx.channel().remoteAddress(), ctx.channel().localAddress()}); ctx.pipeline() .replace( this, null, fallbackProtocolNegotiator.newHandler(grpcHandler)); ctx.fireUserEventTriggered(pne); return; } else { ctx.pipeline() .replace( this, null, new ServerSecurityHandler( grpcHandler, sslContextProviderSupplier)); ctx.fireUserEventTriggered(pne); return; } } else { super.userEventTriggered(ctx, evt); } } } @VisibleForTesting static final
HandlerPickerHandler
java
dropwizard__dropwizard
dropwizard-jersey/src/main/java/io/dropwizard/jersey/params/AbstractParam.java
{ "start": 373, "end": 505 }
class ____ which to build Jersey parameter classes. * * @param <T> the type of value wrapped by the parameter */ public abstract
from
java
apache__kafka
server-common/src/main/java/org/apache/kafka/server/share/persister/WriteShareGroupStateParameters.java
{ "start": 1079, "end": 2901 }
class ____ implements PersisterParameters { private final GroupTopicPartitionData<PartitionStateBatchData> groupTopicPartitionData; private WriteShareGroupStateParameters(GroupTopicPartitionData<PartitionStateBatchData> groupTopicPartitionData) { this.groupTopicPartitionData = groupTopicPartitionData; } public GroupTopicPartitionData<PartitionStateBatchData> groupTopicPartitionData() { return groupTopicPartitionData; } public static WriteShareGroupStateParameters from(WriteShareGroupStateRequestData data) { return new Builder() .setGroupTopicPartitionData(new GroupTopicPartitionData<>(data.groupId(), data.topics().stream() .map(writeStateData -> new TopicData<>(writeStateData.topicId(), writeStateData.partitions().stream() .map(partitionData -> PartitionFactory.newPartitionStateBatchData( partitionData.partition(), partitionData.stateEpoch(), partitionData.startOffset(), partitionData.deliveryCompleteCount(), partitionData.leaderEpoch(), partitionData.stateBatches().stream() .map(PersisterStateBatch::from) .collect(Collectors.toList()))) .collect(Collectors.toList()))) .collect(Collectors.toList()))) .build(); } public static
WriteShareGroupStateParameters
java
square__moshi
moshi/src/test/java/com/squareup/moshi/internal/ClassJsonAdapterTest.java
{ "start": 6768, "end": 6826 }
class ____ { transient int a; } static
TransientBaseA
java
elastic__elasticsearch
modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java
{ "start": 717, "end": 3244 }
class ____ extends ANode { private final String returnCanonicalTypeName; private final String functionName; private final List<String> canonicalTypeNameParameters; private final List<String> parameterNames; private final SBlock blockNode; private final boolean isInternal; private final boolean isStatic; private final boolean isSynthetic; private final boolean isAutoReturnEnabled; public SFunction( int identifier, Location location, String returnCanonicalTypeName, String name, List<String> canonicalTypeNameParameters, List<String> parameterNames, SBlock blockNode, boolean isInternal, boolean isStatic, boolean isSynthetic, boolean isAutoReturnEnabled ) { super(identifier, location); this.returnCanonicalTypeName = Objects.requireNonNull(returnCanonicalTypeName); this.functionName = Objects.requireNonNull(name); this.canonicalTypeNameParameters = List.copyOf(canonicalTypeNameParameters); this.parameterNames = List.copyOf(parameterNames); this.blockNode = Objects.requireNonNull(blockNode); this.isInternal = isInternal; this.isSynthetic = isSynthetic; this.isStatic = isStatic; this.isAutoReturnEnabled = isAutoReturnEnabled; } public String getReturnCanonicalTypeName() { return returnCanonicalTypeName; } public String getFunctionName() { return functionName; } public List<String> getCanonicalTypeNameParameters() { return canonicalTypeNameParameters; } public List<String> getParameterNames() { return parameterNames; } public SBlock getBlockNode() { return blockNode; } public boolean isInternal() { return isInternal; } public boolean isStatic() { return isStatic; } public boolean isSynthetic() { return isSynthetic; } /** * If set to {@code true} default return values are inserted if * not all paths return a value. */ public boolean isAutoReturnEnabled() { return isAutoReturnEnabled; } @Override public <Scope> void visit(UserTreeVisitor<Scope> userTreeVisitor, Scope scope) { userTreeVisitor.visitFunction(this, scope); } @Override public <Scope> void visitChildren(UserTreeVisitor<Scope> userTreeVisitor, Scope scope) { blockNode.visit(userTreeVisitor, scope); } }
SFunction
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/OpportunisticSchedulerMetrics.java
{ "start": 1680, "end": 5521 }
class ____ { // CHECKSTYLE:OFF:VisibilityModifier private static AtomicBoolean isInitialized = new AtomicBoolean(false); private static final MetricsInfo RECORD_INFO = info("OpportunisticSchedulerMetrics", "Metrics for the Yarn Opportunistic Scheduler"); private static volatile OpportunisticSchedulerMetrics INSTANCE = null; private static MetricsRegistry registry; public static OpportunisticSchedulerMetrics getMetrics() { if(!isInitialized.get()){ synchronized (OpportunisticSchedulerMetrics.class) { if(INSTANCE == null){ INSTANCE = new OpportunisticSchedulerMetrics(); registerMetrics(); isInitialized.set(true); } } } return INSTANCE; } @VisibleForTesting public static void resetMetrics() { synchronized (OpportunisticSchedulerMetrics.class) { isInitialized.set(false); INSTANCE = null; MetricsSystem ms = DefaultMetricsSystem.instance(); if (ms != null) { ms.unregisterSource("OpportunisticSchedulerMetrics"); } } } private static void registerMetrics() { registry = new MetricsRegistry(RECORD_INFO); registry.tag(RECORD_INFO, "ResourceManager"); MetricsSystem ms = DefaultMetricsSystem.instance(); if (ms != null) { ms.register("OpportunisticSchedulerMetrics", "Metrics for the Yarn Opportunistic Scheduler", INSTANCE); } } @Metric("# of allocated opportunistic containers") MutableGaugeInt allocatedOContainers; @Metric("Aggregate # of allocated opportunistic containers") MutableCounterLong aggregateOContainersAllocated; @Metric("Aggregate # of released opportunistic containers") MutableCounterLong aggregateOContainersReleased; @Metric("Aggregate # of allocated node-local opportunistic containers") MutableCounterLong aggregateNodeLocalOContainersAllocated; @Metric("Aggregate # of allocated rack-local opportunistic containers") MutableCounterLong aggregateRackLocalOContainersAllocated; @Metric("Aggregate # of allocated off-switch opportunistic containers") MutableCounterLong aggregateOffSwitchOContainersAllocated; @Metric("Aggregate latency for opportunistic container allocation") MutableQuantiles allocateLatencyOQuantiles; @VisibleForTesting public int getAllocatedContainers() { return allocatedOContainers.value(); } @VisibleForTesting public long getAggregatedAllocatedContainers() { return aggregateOContainersAllocated.value(); } @VisibleForTesting public long getAggregatedReleasedContainers() { return aggregateOContainersReleased.value(); } @VisibleForTesting public long getAggregatedNodeLocalContainers() { return aggregateNodeLocalOContainersAllocated.value(); } @VisibleForTesting public long getAggregatedRackLocalContainers() { return aggregateRackLocalOContainersAllocated.value(); } @VisibleForTesting public long getAggregatedOffSwitchContainers() { return aggregateOffSwitchOContainersAllocated.value(); } // Opportunistic Containers public void incrAllocatedOppContainers(int numContainers) { allocatedOContainers.incr(numContainers); aggregateOContainersAllocated.incr(numContainers); } public void incrReleasedOppContainers(int numContainers) { aggregateOContainersReleased.incr(numContainers); allocatedOContainers.decr(numContainers); } public void incrNodeLocalOppContainers() { aggregateNodeLocalOContainersAllocated.incr(); } public void incrRackLocalOppContainers() { aggregateRackLocalOContainersAllocated.incr(); } public void incrOffSwitchOppContainers() { aggregateOffSwitchOContainersAllocated.incr(); } public void addAllocateOLatencyEntry(long latency) { allocateLatencyOQuantiles.add(latency); } }
OpportunisticSchedulerMetrics
java
elastic__elasticsearch
build-tools/src/main/java/org/elasticsearch/gradle/PropertyNormalization.java
{ "start": 510, "end": 746 }
enum ____ { /** * Uses default strategy based on runtime property type. */ DEFAULT, /** * Ignores property value completely for the purposes of input snapshotting. */ IGNORE_VALUE }
PropertyNormalization
java
spring-projects__spring-boot
core/spring-boot/src/test/java/org/springframework/boot/context/properties/source/ConfigurationPropertyNameAliasesTests.java
{ "start": 979, "end": 4556 }
class ____ { @Test @SuppressWarnings("NullAway") // Test null check void createWithStringWhenNullNameShouldThrowException() { assertThatIllegalArgumentException().isThrownBy(() -> new ConfigurationPropertyNameAliases((String) null)) .withMessageContaining("'name' must not be null"); } @Test void createWithStringShouldAddMapping() { ConfigurationPropertyNameAliases aliases = new ConfigurationPropertyNameAliases("foo", "bar", "baz"); assertThat(aliases.getAliases(ConfigurationPropertyName.of("foo"))) .containsExactly(ConfigurationPropertyName.of("bar"), ConfigurationPropertyName.of("baz")); } @Test void createWithNameShouldAddMapping() { ConfigurationPropertyNameAliases aliases = new ConfigurationPropertyNameAliases( ConfigurationPropertyName.of("foo"), ConfigurationPropertyName.of("bar"), ConfigurationPropertyName.of("baz")); assertThat(aliases.getAliases(ConfigurationPropertyName.of("foo"))) .containsExactly(ConfigurationPropertyName.of("bar"), ConfigurationPropertyName.of("baz")); } @Test void addAliasesFromStringShouldAddMapping() { ConfigurationPropertyNameAliases aliases = new ConfigurationPropertyNameAliases(); aliases.addAliases("foo", "bar", "baz"); assertThat(aliases.getAliases(ConfigurationPropertyName.of("foo"))) .containsExactly(ConfigurationPropertyName.of("bar"), ConfigurationPropertyName.of("baz")); } @Test void addAliasesFromNameShouldAddMapping() { ConfigurationPropertyNameAliases aliases = new ConfigurationPropertyNameAliases(); aliases.addAliases(ConfigurationPropertyName.of("foo"), ConfigurationPropertyName.of("bar"), ConfigurationPropertyName.of("baz")); assertThat(aliases.getAliases(ConfigurationPropertyName.of("foo"))) .containsExactly(ConfigurationPropertyName.of("bar"), ConfigurationPropertyName.of("baz")); } @Test void addWhenHasExistingShouldAddAdditionalMappings() { ConfigurationPropertyNameAliases aliases = new ConfigurationPropertyNameAliases(); aliases.addAliases("foo", "bar"); aliases.addAliases("foo", "baz"); assertThat(aliases.getAliases(ConfigurationPropertyName.of("foo"))) .containsExactly(ConfigurationPropertyName.of("bar"), ConfigurationPropertyName.of("baz")); } @Test void getAliasesWhenNotMappedShouldReturnEmptyList() { ConfigurationPropertyNameAliases aliases = new ConfigurationPropertyNameAliases(); assertThat(aliases.getAliases(ConfigurationPropertyName.of("foo"))).isEmpty(); } @Test void getAliasesWhenMappedShouldReturnMapping() { ConfigurationPropertyNameAliases aliases = new ConfigurationPropertyNameAliases(); aliases.addAliases("foo", "bar"); assertThat(aliases.getAliases(ConfigurationPropertyName.of("foo"))) .containsExactly(ConfigurationPropertyName.of("bar")); } @Test void getNameForAliasWhenHasMappingShouldReturnName() { ConfigurationPropertyNameAliases aliases = new ConfigurationPropertyNameAliases(); aliases.addAliases("foo", "bar"); aliases.addAliases("foo", "baz"); assertThat((Object) aliases.getNameForAlias(ConfigurationPropertyName.of("bar"))) .isEqualTo(ConfigurationPropertyName.of("foo")); assertThat((Object) aliases.getNameForAlias(ConfigurationPropertyName.of("baz"))) .isEqualTo(ConfigurationPropertyName.of("foo")); } @Test void getNameForAliasWhenNotMappedShouldReturnNull() { ConfigurationPropertyNameAliases aliases = new ConfigurationPropertyNameAliases(); aliases.addAliases("foo", "bar"); assertThat((Object) aliases.getNameForAlias(ConfigurationPropertyName.of("baz"))).isNull(); } }
ConfigurationPropertyNameAliasesTests
java
square__retrofit
retrofit-adapters/rxjava/src/test/java/retrofit2/adapter/rxjava/ObservableThrowingTest.java
{ "start": 1539, "end": 10910 }
interface ____ { @GET("/") Observable<String> body(); @GET("/") Observable<Response<String>> response(); @GET("/") Observable<Result<String>> result(); } private Service service; @Before public void setUp() { Retrofit retrofit = new Retrofit.Builder() .baseUrl(server.url("/")) .addConverterFactory(new StringConverterFactory()) .addCallAdapterFactory(RxJavaCallAdapterFactory.create()) .build(); service = retrofit.create(Service.class); } @Test public void bodyThrowingInOnNextDeliveredToError() { server.enqueue(new MockResponse()); RecordingSubscriber<String> observer = subscriberRule.create(); final RuntimeException e = new RuntimeException(); service .body() .unsafeSubscribe( new ForwardingSubscriber<String>(observer) { @Override public void onNext(String value) { throw e; } }); observer.assertError(e); } @Test public void bodyThrowingInOnCompleteDeliveredToPlugin() { server.enqueue(new MockResponse()); final AtomicReference<Throwable> pluginRef = new AtomicReference<>(); RxJavaPlugins.getInstance() .registerErrorHandler( new RxJavaErrorHandler() { @Override public void handleError(Throwable throwable) { if (!pluginRef.compareAndSet(null, throwable)) { throw Exceptions.propagate(throwable); // Don't swallow secondary errors! } } }); RecordingSubscriber<String> observer = subscriberRule.create(); final RuntimeException e = new RuntimeException(); service .body() .unsafeSubscribe( new ForwardingSubscriber<String>(observer) { @Override public void onCompleted() { throw e; } }); observer.assertAnyValue(); assertThat(pluginRef.get()).isSameInstanceAs(e); } @Test public void bodyThrowingInOnErrorDeliveredToPlugin() { server.enqueue(new MockResponse().setResponseCode(404)); final AtomicReference<Throwable> pluginRef = new AtomicReference<>(); RxJavaPlugins.getInstance() .registerErrorHandler( new RxJavaErrorHandler() { @Override public void handleError(Throwable throwable) { if (!pluginRef.compareAndSet(null, throwable)) { throw Exceptions.propagate(throwable); // Don't swallow secondary errors! } } }); RecordingSubscriber<String> observer = subscriberRule.create(); final AtomicReference<Throwable> errorRef = new AtomicReference<>(); final RuntimeException e = new RuntimeException(); service .body() .unsafeSubscribe( new ForwardingSubscriber<String>(observer) { @Override public void onError(Throwable throwable) { if (!errorRef.compareAndSet(null, throwable)) { throw Exceptions.propagate(throwable); } throw e; } }); CompositeException composite = (CompositeException) pluginRef.get(); assertThat(composite.getExceptions()).containsExactly(errorRef.get(), e); } @Test public void responseThrowingInOnNextDeliveredToError() { server.enqueue(new MockResponse()); RecordingSubscriber<Response<String>> observer = subscriberRule.create(); final RuntimeException e = new RuntimeException(); service .response() .unsafeSubscribe( new ForwardingSubscriber<Response<String>>(observer) { @Override public void onNext(Response<String> value) { throw e; } }); observer.assertError(e); } @Test public void responseThrowingInOnCompleteDeliveredToPlugin() { server.enqueue(new MockResponse()); final AtomicReference<Throwable> pluginRef = new AtomicReference<>(); RxJavaPlugins.getInstance() .registerErrorHandler( new RxJavaErrorHandler() { @Override public void handleError(Throwable throwable) { if (!pluginRef.compareAndSet(null, throwable)) { throw Exceptions.propagate(throwable); // Don't swallow secondary errors! } } }); RecordingSubscriber<Response<String>> observer = subscriberRule.create(); final RuntimeException e = new RuntimeException(); service .response() .unsafeSubscribe( new ForwardingSubscriber<Response<String>>(observer) { @Override public void onCompleted() { throw e; } }); observer.assertAnyValue(); assertThat(pluginRef.get()).isSameInstanceAs(e); } @Test public void responseThrowingInOnErrorDeliveredToPlugin() { server.enqueue(new MockResponse().setSocketPolicy(DISCONNECT_AFTER_REQUEST)); final AtomicReference<Throwable> pluginRef = new AtomicReference<>(); RxJavaPlugins.getInstance() .registerErrorHandler( new RxJavaErrorHandler() { @Override public void handleError(Throwable throwable) { if (!pluginRef.compareAndSet(null, throwable)) { throw Exceptions.propagate(throwable); // Don't swallow secondary errors! } } }); RecordingSubscriber<Response<String>> observer = subscriberRule.create(); final AtomicReference<Throwable> errorRef = new AtomicReference<>(); final RuntimeException e = new RuntimeException(); service .response() .unsafeSubscribe( new ForwardingSubscriber<Response<String>>(observer) { @Override public void onError(Throwable throwable) { if (!errorRef.compareAndSet(null, throwable)) { throw Exceptions.propagate(throwable); } throw e; } }); CompositeException composite = (CompositeException) pluginRef.get(); assertThat(composite.getExceptions()).containsExactly(errorRef.get(), e); } @Test public void resultThrowingInOnNextDeliveredToError() { server.enqueue(new MockResponse()); RecordingSubscriber<Result<String>> observer = subscriberRule.create(); final RuntimeException e = new RuntimeException(); service .result() .unsafeSubscribe( new ForwardingSubscriber<Result<String>>(observer) { @Override public void onNext(Result<String> value) { throw e; } }); observer.assertError(e); } @Test public void resultThrowingInOnCompletedDeliveredToPlugin() { server.enqueue(new MockResponse()); final AtomicReference<Throwable> pluginRef = new AtomicReference<>(); RxJavaPlugins.getInstance() .registerErrorHandler( new RxJavaErrorHandler() { @Override public void handleError(Throwable throwable) { if (!pluginRef.compareAndSet(null, throwable)) { throw Exceptions.propagate(throwable); // Don't swallow secondary errors! } } }); RecordingSubscriber<Result<String>> observer = subscriberRule.create(); final RuntimeException e = new RuntimeException(); service .result() .unsafeSubscribe( new ForwardingSubscriber<Result<String>>(observer) { @Override public void onCompleted() { throw e; } }); observer.assertAnyValue(); assertThat(pluginRef.get()).isSameInstanceAs(e); } @Test public void resultThrowingInOnErrorDeliveredToPlugin() { server.enqueue(new MockResponse()); final AtomicReference<Throwable> pluginRef = new AtomicReference<>(); RxJavaPlugins.getInstance() .registerErrorHandler( new RxJavaErrorHandler() { @Override public void handleError(Throwable throwable) { if (!pluginRef.compareAndSet(null, throwable)) { throw Exceptions.propagate(throwable); // Don't swallow secondary errors! } } }); RecordingSubscriber<Result<String>> observer = subscriberRule.create(); final RuntimeException first = new RuntimeException(); final RuntimeException second = new RuntimeException(); service .result() .unsafeSubscribe( new ForwardingSubscriber<Result<String>>(observer) { @Override public void onNext(Result<String> value) { // The only way to trigger onError for a result is if onNext throws. throw first; } @Override public void onError(Throwable throwable) { throw second; } }); CompositeException composite = (CompositeException) pluginRef.get(); assertThat(composite.getExceptions()).containsExactly(first, second); } }
Service
java
apache__hadoop
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
{ "start": 23386, "end": 109418 }
interface ____ NameNode // is listening on. In this case, it is localhost. String uri = "hdfs://127.0.0.1:" + cluster.getNameNodePort() + "/test/ipAddress/file"; Path path = new Path(uri); FileSystem fs = FileSystem.get(path.toUri(), conf); FSDataOutputStream out = fs.create(path); byte[] buf = new byte[1024]; out.write(buf); out.close(); FSDataInputStream in = fs.open(path); in.readFully(buf); in.close(); fs.close(); } { // Test PathIsNotEmptyDirectoryException while deleting non-empty dir FileSystem fs = cluster.getFileSystem(); fs.mkdirs(new Path("/test/nonEmptyDir")); fs.create(new Path("/tmp/nonEmptyDir/emptyFile")).close(); try { fs.delete(new Path("/tmp/nonEmptyDir"), false); fail("Expecting PathIsNotEmptyDirectoryException"); } catch (PathIsNotEmptyDirectoryException ex) { // This is the proper exception to catch; move on. } assertTrue(fs.exists(new Path("/test/nonEmptyDir"))); fs.delete(new Path("/tmp/nonEmptyDir"), true); } } finally { if (cluster != null) {cluster.shutdown();} } } /** * This is to test that the {@link FileSystem#clearStatistics()} resets all * the global storage statistics. */ @Test public void testClearStatistics() throws Exception { final Configuration conf = getTestConfiguration(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { cluster.waitActive(); FileSystem dfs = cluster.getFileSystem(); final Path dir = new Path("/testClearStatistics"); final long mkdirCount = getOpStatistics(OpType.MKDIRS); long writeCount = DFSTestUtil.getStatistics(dfs).getWriteOps(); dfs.mkdirs(dir); checkOpStatistics(OpType.MKDIRS, mkdirCount + 1); assertEquals(++writeCount, DFSTestUtil.getStatistics(dfs).getWriteOps()); final long createCount = getOpStatistics(OpType.CREATE); FSDataOutputStream out = dfs.create(new Path(dir, "tmpFile"), (short)1); out.write(40); out.close(); checkOpStatistics(OpType.CREATE, createCount + 1); assertEquals(++writeCount, DFSTestUtil.getStatistics(dfs).getWriteOps()); FileSystem.clearStatistics(); checkOpStatistics(OpType.MKDIRS, 0); checkOpStatistics(OpType.CREATE, 0); checkStatistics(dfs, 0, 0, 0); } finally { cluster.shutdown(); } } /** * This is to test that {@link DFSConfigKeys#DFS_LIST_LIMIT} works as * expected when {@link DistributedFileSystem#listLocatedStatus} is called. */ @Test public void testGetListingLimit() throws Exception { final Configuration conf = getTestConfiguration(); conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 9); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build()) { cluster.waitActive(); ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy(); final DistributedFileSystem fs = cluster.getFileSystem(); fs.dfs = spy(fs.dfs); Path dir1 = new Path("/testRep"); Path dir2 = new Path("/testEC"); fs.mkdirs(dir1); fs.mkdirs(dir2); fs.setErasureCodingPolicy(dir2, ecPolicy.getName()); for (int i = 0; i < 3; i++) { DFSTestUtil.createFile(fs, new Path(dir1, String.valueOf(i)), 20 * 1024L, (short) 3, 1); DFSTestUtil.createStripedFile(cluster, new Path(dir2, String.valueOf(i)), dir2, 1, 1, false); } List<LocatedFileStatus> str = RemoteIterators.toList(fs.listLocatedStatus(dir1)); assertThat(str).hasSize(3); Mockito.verify(fs.dfs, Mockito.times(1)).listPaths(anyString(), any(), anyBoolean()); str = RemoteIterators.toList(fs.listLocatedStatus(dir2)); assertThat(str).hasSize(3); Mockito.verify(fs.dfs, Mockito.times(4)).listPaths(anyString(), any(), anyBoolean()); } } @Test public void testStatistics() throws IOException { FileSystem.getStatistics(HdfsConstants.HDFS_URI_SCHEME, DistributedFileSystem.class).reset(); @SuppressWarnings("unchecked") ThreadLocal<StatisticsData> data = (ThreadLocal<StatisticsData>) Whitebox.getInternalState( FileSystem.getStatistics(HdfsConstants.HDFS_URI_SCHEME, DistributedFileSystem.class), "threadData"); data.set(null); int lsLimit = 2; final Configuration conf = getTestConfiguration(); conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, lsLimit); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { cluster.waitActive(); final FileSystem fs = cluster.getFileSystem(); Path dir = new Path("/test"); Path file = new Path(dir, "file"); int readOps = 0; int writeOps = 0; int largeReadOps = 0; long opCount = getOpStatistics(OpType.MKDIRS); fs.mkdirs(dir); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.MKDIRS, opCount + 1); opCount = getOpStatistics(OpType.CREATE); FSDataOutputStream out = fs.create(file, (short)1); out.close(); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.CREATE, opCount + 1); opCount = getOpStatistics(OpType.GET_FILE_STATUS); FileStatus status = fs.getFileStatus(file); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.GET_FILE_STATUS, opCount + 1); opCount = getOpStatistics(OpType.GET_FILE_BLOCK_LOCATIONS); fs.getFileBlockLocations(file, 0, 0); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.GET_FILE_BLOCK_LOCATIONS, opCount + 1); fs.getFileBlockLocations(status, 0, 0); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.GET_FILE_BLOCK_LOCATIONS, opCount + 2); opCount = getOpStatistics(OpType.OPEN); FSDataInputStream in = fs.open(file); in.close(); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.OPEN, opCount + 1); opCount = getOpStatistics(OpType.SET_REPLICATION); fs.setReplication(file, (short)2); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.SET_REPLICATION, opCount + 1); opCount = getOpStatistics(OpType.RENAME); Path file1 = new Path(dir, "file1"); fs.rename(file, file1); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.RENAME, opCount + 1); opCount = getOpStatistics(OpType.GET_CONTENT_SUMMARY); fs.getContentSummary(file1); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.GET_CONTENT_SUMMARY, opCount + 1); // Iterative ls test long mkdirOp = getOpStatistics(OpType.MKDIRS); long listStatusOp = getOpStatistics(OpType.LIST_STATUS); long locatedListStatusOP = getOpStatistics(OpType.LIST_LOCATED_STATUS); for (int i = 0; i < 10; i++) { Path p = new Path(dir, Integer.toString(i)); fs.mkdirs(p); mkdirOp++; FileStatus[] list = fs.listStatus(dir); if (list.length > lsLimit) { // if large directory, then count readOps and largeReadOps by // number times listStatus iterates int iterations = (int)Math.ceil((double)list.length/lsLimit); largeReadOps += iterations; readOps += iterations; listStatusOp += iterations; } else { // Single iteration in listStatus - no large read operation done readOps++; listStatusOp++; } // writeOps incremented by 1 for mkdirs // readOps and largeReadOps incremented by 1 or more checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.MKDIRS, mkdirOp); checkOpStatistics(OpType.LIST_STATUS, listStatusOp); fs.listLocatedStatus(dir); locatedListStatusOP++; readOps++; checkStatistics(fs, readOps, writeOps, largeReadOps); checkOpStatistics(OpType.LIST_LOCATED_STATUS, locatedListStatusOP); } opCount = getOpStatistics(OpType.GET_STATUS); fs.getStatus(file1); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.GET_STATUS, opCount + 1); opCount = getOpStatistics(OpType.GET_FILE_CHECKSUM); fs.getFileChecksum(file1); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.GET_FILE_CHECKSUM, opCount + 1); opCount = getOpStatistics(OpType.SET_PERMISSION); fs.setPermission(file1, new FsPermission((short)0777)); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.SET_PERMISSION, opCount + 1); opCount = getOpStatistics(OpType.SET_TIMES); fs.setTimes(file1, 0L, 0L); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.SET_TIMES, opCount + 1); opCount = getOpStatistics(OpType.SET_OWNER); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); fs.setOwner(file1, ugi.getUserName(), ugi.getGroupNames()[0]); checkOpStatistics(OpType.SET_OWNER, opCount + 1); checkStatistics(fs, readOps, ++writeOps, largeReadOps); opCount = getOpStatistics(OpType.DELETE); fs.delete(dir, true); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.DELETE, opCount + 1); } finally { if (cluster != null) cluster.shutdown(); } } @Test public void testStatistics2() throws IOException, NoSuchAlgorithmException { HdfsConfiguration conf = getTestConfiguration(); conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, StoragePolicySatisfierMode.EXTERNAL.toString()); File tmpDir = GenericTestUtils.getTestDir(UUID.randomUUID().toString()); final Path jksPath = new Path(tmpDir.toString(), "test.jks"); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); Path dir = new Path("/testStat"); dfs.mkdirs(dir); int readOps = 0; int writeOps = 0; FileSystem.clearStatistics(); // Quota Commands. long opCount = getOpStatistics(OpType.SET_QUOTA_USAGE); dfs.setQuota(dir, 100, 1000); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.SET_QUOTA_USAGE, opCount + 1); opCount = getOpStatistics(OpType.SET_QUOTA_BYTSTORAGEYPE); dfs.setQuotaByStorageType(dir, StorageType.DEFAULT, 2000); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.SET_QUOTA_BYTSTORAGEYPE, opCount + 1); opCount = getOpStatistics(OpType.GET_QUOTA_USAGE); dfs.getQuotaUsage(dir); checkStatistics(dfs, ++readOps, writeOps, 0); checkOpStatistics(OpType.GET_QUOTA_USAGE, opCount + 1); // Satisfy Storage Policy. opCount = getOpStatistics(OpType.SATISFY_STORAGE_POLICY); dfs.satisfyStoragePolicy(dir); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.SATISFY_STORAGE_POLICY, opCount + 1); // Cache Commands. CachePoolInfo cacheInfo = new CachePoolInfo("pool1").setMode(new FsPermission((short) 0)); opCount = getOpStatistics(OpType.ADD_CACHE_POOL); dfs.addCachePool(cacheInfo); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.ADD_CACHE_POOL, opCount + 1); CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder() .setPath(new Path(".")).setPool("pool1").build(); opCount = getOpStatistics(OpType.ADD_CACHE_DIRECTIVE); long id = dfs.addCacheDirective(directive); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.ADD_CACHE_DIRECTIVE, opCount + 1); opCount = getOpStatistics(OpType.LIST_CACHE_DIRECTIVE); dfs.listCacheDirectives(null); checkStatistics(dfs, ++readOps, writeOps, 0); checkOpStatistics(OpType.LIST_CACHE_DIRECTIVE, opCount + 1); opCount = getOpStatistics(OpType.MODIFY_CACHE_DIRECTIVE); dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id) .setReplication((short) 2).build()); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.MODIFY_CACHE_DIRECTIVE, opCount + 1); opCount = getOpStatistics(OpType.REMOVE_CACHE_DIRECTIVE); dfs.removeCacheDirective(id); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.REMOVE_CACHE_DIRECTIVE, opCount + 1); opCount = getOpStatistics(OpType.MODIFY_CACHE_POOL); dfs.modifyCachePool(cacheInfo); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.MODIFY_CACHE_POOL, opCount + 1); opCount = getOpStatistics(OpType.LIST_CACHE_POOL); dfs.listCachePools(); checkStatistics(dfs, ++readOps, writeOps, 0); checkOpStatistics(OpType.LIST_CACHE_POOL, opCount + 1); opCount = getOpStatistics(OpType.REMOVE_CACHE_POOL); dfs.removeCachePool(cacheInfo.getPoolName()); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.REMOVE_CACHE_POOL, opCount + 1); // Crypto Commands. final KeyProvider provider = cluster.getNameNode().getNamesystem().getProvider(); final KeyProvider.Options options = KeyProvider.options(conf); provider.createKey("key", options); provider.flush(); opCount = getOpStatistics(OpType.CREATE_ENCRYPTION_ZONE); dfs.createEncryptionZone(dir, "key"); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.CREATE_ENCRYPTION_ZONE, opCount + 1); opCount = getOpStatistics(OpType.LIST_ENCRYPTION_ZONE); dfs.listEncryptionZones(); checkStatistics(dfs, ++readOps, writeOps, 0); checkOpStatistics(OpType.LIST_ENCRYPTION_ZONE, opCount + 1); opCount = getOpStatistics(OpType.GET_ENCRYPTION_ZONE); dfs.getEZForPath(dir); checkStatistics(dfs, ++readOps, writeOps, 0); checkOpStatistics(OpType.GET_ENCRYPTION_ZONE, opCount + 1); opCount = getOpStatistics(OpType.GET_ENCLOSING_ROOT); dfs.getEnclosingRoot(dir); checkStatistics(dfs, ++readOps, writeOps, 0); checkOpStatistics(OpType.GET_ENCLOSING_ROOT, opCount + 1); opCount = getOpStatistics(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST); dfs.getSnapshottableDirListing(); checkStatistics(dfs, ++readOps, writeOps, 0); checkOpStatistics(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST, opCount + 1); opCount = getOpStatistics(OpType.GET_STORAGE_POLICIES); dfs.getAllStoragePolicies(); checkStatistics(dfs, ++readOps, writeOps, 0); checkOpStatistics(OpType.GET_STORAGE_POLICIES, opCount + 1); opCount = getOpStatistics(OpType.GET_TRASH_ROOT); dfs.getTrashRoot(dir); checkStatistics(dfs, ++readOps, writeOps, 0); checkOpStatistics(OpType.GET_TRASH_ROOT, opCount + 1); } } @Test public void testECStatistics() throws IOException { try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(getTestConfiguration()).build()) { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); Path dir = new Path("/test"); dfs.mkdirs(dir); int readOps = 0; int writeOps = 0; FileSystem.clearStatistics(); long opCount = getOpStatistics(OpType.ENABLE_EC_POLICY); dfs.enableErasureCodingPolicy("RS-10-4-1024k"); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.ENABLE_EC_POLICY, opCount + 1); opCount = getOpStatistics(OpType.SET_EC_POLICY); dfs.setErasureCodingPolicy(dir, "RS-10-4-1024k"); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.SET_EC_POLICY, opCount + 1); opCount = getOpStatistics(OpType.GET_EC_POLICY); dfs.getErasureCodingPolicy(dir); checkStatistics(dfs, ++readOps, writeOps, 0); checkOpStatistics(OpType.GET_EC_POLICY, opCount + 1); opCount = getOpStatistics(OpType.UNSET_EC_POLICY); dfs.unsetErasureCodingPolicy(dir); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.UNSET_EC_POLICY, opCount + 1); opCount = getOpStatistics(OpType.GET_EC_POLICIES); dfs.getAllErasureCodingPolicies(); checkStatistics(dfs, ++readOps, writeOps, 0); checkOpStatistics(OpType.GET_EC_POLICIES, opCount + 1); opCount = getOpStatistics(OpType.GET_EC_CODECS); dfs.getAllErasureCodingCodecs(); checkStatistics(dfs, ++readOps, writeOps, 0); checkOpStatistics(OpType.GET_EC_CODECS, opCount + 1); ErasureCodingPolicy newPolicy = new ErasureCodingPolicy(new ECSchema("rs", 5, 3), 1024 * 1024); opCount = getOpStatistics(OpType.ADD_EC_POLICY); dfs.addErasureCodingPolicies(new ErasureCodingPolicy[] {newPolicy}); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.ADD_EC_POLICY, opCount + 1); opCount = getOpStatistics(OpType.REMOVE_EC_POLICY); dfs.removeErasureCodingPolicy("RS-5-3-1024k"); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.REMOVE_EC_POLICY, opCount + 1); opCount = getOpStatistics(OpType.DISABLE_EC_POLICY); dfs.disableErasureCodingPolicy("RS-10-4-1024k"); checkStatistics(dfs, readOps, ++writeOps, 0); checkOpStatistics(OpType.DISABLE_EC_POLICY, opCount + 1); } } @SuppressWarnings("ThrowableResultOfMethodCallIgnored") @Test @Timeout(value = 180) public void testConcurrentStatistics() throws IOException, InterruptedException { FileSystem.getStatistics(HdfsConstants.HDFS_URI_SCHEME, DistributedFileSystem.class).reset(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder( new Configuration()).build(); cluster.waitActive(); final FileSystem fs = cluster.getFileSystem(); final int numThreads = 5; final ExecutorService threadPool = HadoopExecutors.newFixedThreadPool(numThreads); try { final CountDownLatch allExecutorThreadsReady = new CountDownLatch(numThreads); final CountDownLatch startBlocker = new CountDownLatch(1); final CountDownLatch allDone = new CountDownLatch(numThreads); final AtomicReference<Throwable> childError = new AtomicReference<>(); for (int i = 0; i < numThreads; i++) { threadPool.submit(new Runnable() { @Override public void run() { allExecutorThreadsReady.countDown(); try { startBlocker.await(); final FileSystem fs = cluster.getFileSystem(); fs.mkdirs(new Path("/testStatisticsParallelChild")); } catch (Throwable t) { LOG.error("Child failed when calling mkdir", t); childError.compareAndSet(null, t); } finally { allDone.countDown(); } } }); } final long oldMkdirOpCount = getOpStatistics(OpType.MKDIRS); // wait until all threads are ready allExecutorThreadsReady.await(); // all threads start making directories startBlocker.countDown(); // wait until all threads are done allDone.await(); assertNull(childError.get(), "Child failed with exception " + childError.get()); checkStatistics(fs, 0, numThreads, 0); // check the single operation count stat checkOpStatistics(OpType.MKDIRS, numThreads + oldMkdirOpCount); // iterate all the operation counts for (Iterator<LongStatistic> opCountIter = FileSystem.getGlobalStorageStatistics() .get(DFSOpsCountStatistics.NAME).getLongStatistics(); opCountIter.hasNext();) { final LongStatistic opCount = opCountIter.next(); if (OpType.MKDIRS.getSymbol().equals(opCount.getName())) { assertEquals(numThreads + oldMkdirOpCount, opCount.getValue(), "Unexpected op count from iterator!"); } LOG.info(opCount.getName() + "\t" + opCount.getValue()); } } finally { threadPool.shutdownNow(); cluster.shutdown(); } } /** Checks statistics. -1 indicates do not check for the operations */ public static void checkStatistics(FileSystem fs, int readOps, int writeOps, int largeReadOps) { assertEquals(readOps, DFSTestUtil.getStatistics(fs).getReadOps()); assertEquals(writeOps, DFSTestUtil.getStatistics(fs).getWriteOps()); assertEquals(largeReadOps, DFSTestUtil.getStatistics(fs).getLargeReadOps()); } /** Checks read statistics. */ private void checkReadStatistics(FileSystem fs, int distance, long expectedReadBytes) { long bytesRead = DFSTestUtil.getStatistics(fs). getBytesReadByDistance(distance); assertEquals(expectedReadBytes, bytesRead); } @Test public void testLocalHostReadStatistics() throws Exception { testReadFileSystemStatistics(0, false, false); } @Test public void testLocalRackReadStatistics() throws Exception { testReadFileSystemStatistics(2, false, false); } @Test public void testRemoteRackOfFirstDegreeReadStatistics() throws Exception { testReadFileSystemStatistics(4, false, false); } @Test public void testInvalidScriptMappingFileReadStatistics() throws Exception { // Even though network location of the client machine is unknown, // MiniDFSCluster's datanode is on the local host and thus the network // distance is 0. testReadFileSystemStatistics(0, true, true); } @Test public void testEmptyScriptMappingFileReadStatistics() throws Exception { // Network location of the client machine is resolved to // {@link NetworkTopology#DEFAULT_RACK} when there is no script file // defined. This is equivalent to unknown network location. // MiniDFSCluster's datanode is on the local host and thus the network // distance is 0. testReadFileSystemStatistics(0, true, false); } /** expectedDistance is the expected distance between client and dn. * 0 means local host. * 2 means same rack. * 4 means remote rack of first degree. * invalidScriptMappingConfig is used to test */ private void testReadFileSystemStatistics(int expectedDistance, boolean useScriptMapping, boolean invalidScriptMappingFile) throws IOException { MiniDFSCluster cluster = null; StaticMapping.addNodeToRack(NetUtils.getLocalHostname(), "/rackClient"); final Configuration conf = getTestConfiguration(); conf.setBoolean(FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED, true); // ClientContext is cached globally by default thus we will end up using // the network distance computed by other test cases. // Use different value for DFS_CLIENT_CONTEXT in each test case so that it // can compute network distance independently. conf.set(DFS_CLIENT_CONTEXT, "testContext_" + expectedDistance); // create a cluster with a dn with the expected distance. // MiniDFSCluster by default uses StaticMapping unless the test // overrides it. if (useScriptMapping) { conf.setClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, ScriptBasedMapping.class, DNSToSwitchMapping.class); if (invalidScriptMappingFile) { conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "invalidScriptFile.txt"); } cluster = new MiniDFSCluster.Builder(conf). useConfiguredTopologyMappingClass(true).build(); } else if (expectedDistance == 0) { cluster = new MiniDFSCluster.Builder(conf). hosts(new String[] {NetUtils.getLocalHostname()}).build(); } else if (expectedDistance == 2) { cluster = new MiniDFSCluster.Builder(conf). racks(new String[]{"/rackClient"}).build(); } else if (expectedDistance == 4) { cluster = new MiniDFSCluster.Builder(conf). racks(new String[]{"/rackFoo"}).build(); } // create a file, read the file and verify the metrics try { final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.getStatistics(fs).reset(); Path dir = new Path("/test"); Path file = new Path(dir, "file"); String input = "hello world"; DFSTestUtil.writeFile(fs, file, input); FSDataInputStream stm = fs.open(file); byte[] actual = new byte[4096]; stm.read(actual); checkReadStatistics(fs, expectedDistance, input.length()); } finally { if (cluster != null) cluster.shutdown(); } } public static void checkOpStatistics(OpType op, long count) { assertEquals(count, getOpStatistics(op), "Op " + op.getSymbol() + " has unexpected count!"); } public static long getOpStatistics(OpType op) { return GlobalStorageStatistics.INSTANCE.get( DFSOpsCountStatistics.NAME) .getLong(op.getSymbol()); } @Test public void testFileChecksum() throws Exception { final long seed = RAN.nextLong(); System.out.println("seed=" + seed); RAN.setSeed(seed); final Configuration conf = getTestConfiguration(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(2).build(); final FileSystem hdfs = cluster.getFileSystem(); final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); final UserGroupInformation current = UserGroupInformation.getCurrentUser(); final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( current.getShortUserName() + "x", new String[]{"user"}); try { hdfs.getFileChecksum(new Path( "/test/TestNonExistingFile")); fail("Expecting FileNotFoundException"); } catch (FileNotFoundException e) { assertTrue(e.getMessage().contains("File does not exist: /test/TestNonExistingFile"), "Not throwing the intended exception message"); } try { Path path = new Path("/test/TestExistingDir/"); hdfs.mkdirs(path); hdfs.getFileChecksum(path); fail("Expecting FileNotFoundException"); } catch (FileNotFoundException e) { assertTrue(e.getMessage().contains("Path is not a file: /test/TestExistingDir"), "Not throwing the intended exception message"); } //webhdfs final String webhdfsuri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + nnAddr; System.out.println("webhdfsuri=" + webhdfsuri); final FileSystem webhdfs = ugi.doAs( new PrivilegedExceptionAction<FileSystem>() { @Override public FileSystem run() throws Exception { return new Path(webhdfsuri).getFileSystem(conf); } }); final Path dir = new Path("/filechecksum"); final int block_size = 1024; final int buffer_size = conf.getInt( CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096); conf.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512); //try different number of blocks for(int n = 0; n < 5; n++) { //generate random data final byte[] data = new byte[RAN.nextInt(block_size/2-1)+n*block_size+1]; RAN.nextBytes(data); System.out.println("data.length=" + data.length); //write data to a file final Path foo = new Path(dir, "foo" + n); { final FSDataOutputStream out = hdfs.create(foo, false, buffer_size, (short)2, block_size); out.write(data); out.close(); } //compute checksum final FileChecksum hdfsfoocs = hdfs.getFileChecksum(foo); System.out.println("hdfsfoocs=" + hdfsfoocs); //webhdfs final FileChecksum webhdfsfoocs = webhdfs.getFileChecksum(foo); System.out.println("webhdfsfoocs=" + webhdfsfoocs); final Path webhdfsqualified = new Path(webhdfsuri + dir, "foo" + n); final FileChecksum webhdfs_qfoocs = webhdfs.getFileChecksum(webhdfsqualified); System.out.println("webhdfs_qfoocs=" + webhdfs_qfoocs); //create a zero byte file final Path zeroByteFile = new Path(dir, "zeroByteFile" + n); { final FSDataOutputStream out = hdfs.create(zeroByteFile, false, buffer_size, (short)2, block_size); out.close(); } //write another file final Path bar = new Path(dir, "bar" + n); { final FSDataOutputStream out = hdfs.create(bar, false, buffer_size, (short)2, block_size); out.write(data); out.close(); } { final FileChecksum zeroChecksum = hdfs.getFileChecksum(zeroByteFile); final String magicValue = "MD5-of-0MD5-of-0CRC32:70bc8f4b72a86921468bf8e8441dce51"; // verify the magic val for zero byte files assertEquals(magicValue, zeroChecksum.toString()); //verify checksums for empty file and 0 request length final FileChecksum checksumWith0 = hdfs.getFileChecksum(bar, 0); assertEquals(zeroChecksum, checksumWith0); //verify checksum final FileChecksum barcs = hdfs.getFileChecksum(bar); final int barhashcode = barcs.hashCode(); assertEquals(hdfsfoocs.hashCode(), barhashcode); assertEquals(hdfsfoocs, barcs); //webhdfs assertEquals(webhdfsfoocs.hashCode(), barhashcode); assertEquals(webhdfsfoocs, barcs); assertEquals(webhdfs_qfoocs.hashCode(), barhashcode); assertEquals(webhdfs_qfoocs, barcs); } hdfs.setPermission(dir, new FsPermission((short)0)); { //test permission error on webhdfs try { webhdfs.getFileChecksum(webhdfsqualified); fail(); } catch(IOException ioe) { FileSystem.LOG.info("GOOD: getting an exception", ioe); } } hdfs.setPermission(dir, new FsPermission((short)0777)); } cluster.shutdown(); } @Test public void testAllWithDualPort() throws Exception { dualPortTesting = true; try { testFileSystemCloseAll(); testDFSClose(); testDFSClient(); testFileChecksum(); } finally { dualPortTesting = false; } } @Test public void testAllWithNoXmlDefaults() throws Exception { // Do all the tests with a configuration that ignores the defaults in // the XML files. noXmlDefaults = true; try { testFileSystemCloseAll(); testDFSClose(); testDFSClient(); testFileChecksum(); } finally { noXmlDefaults = false; } } @Test @Timeout(value = 120) public void testLocatedFileStatusStorageIdsTypes() throws Exception { final Configuration conf = getTestConfiguration(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3).build(); try { final DistributedFileSystem fs = cluster.getFileSystem(); final Path testFile = new Path("/testListLocatedStatus"); final int blockSize = 4096; final int numBlocks = 10; // Create a test file final int repl = 2; DFSTestUtil.createFile(fs, testFile, blockSize, numBlocks * blockSize, blockSize, (short) repl, 0xADDED); DFSTestUtil.waitForReplication(fs, testFile, (short) repl, 30000); // Get the listing RemoteIterator<LocatedFileStatus> it = fs.listLocatedStatus(testFile); assertTrue(it.hasNext(), "Expected file to be present"); LocatedFileStatus stat = it.next(); BlockLocation[] locs = stat.getBlockLocations(); assertEquals(numBlocks, locs.length, "Unexpected number of locations"); Set<String> dnStorageIds = new HashSet<>(); for (DataNode d : cluster.getDataNodes()) { try (FsDatasetSpi.FsVolumeReferences volumes = d.getFSDataset() .getFsVolumeReferences()) { for (FsVolumeSpi vol : volumes) { dnStorageIds.add(vol.getStorageID()); } } } for (BlockLocation loc : locs) { String[] ids = loc.getStorageIds(); // Run it through a set to deduplicate, since there should be no dupes Set<String> storageIds = new HashSet<>(); Collections.addAll(storageIds, ids); assertEquals(repl, storageIds.size(), "Unexpected num storage ids"); // Make sure these are all valid storage IDs assertTrue(dnStorageIds.containsAll(storageIds), "Unknown storage IDs found!"); // Check storage types are the default, since we didn't set any StorageType[] types = loc.getStorageTypes(); assertEquals(repl, types.length, "Unexpected num storage types"); for (StorageType t: types) { assertEquals(StorageType.DEFAULT, t, "Unexpected storage type"); } } } finally { if (cluster != null) { cluster.shutdown(); } } } @Test public void testCreateWithCustomChecksum() throws Exception { Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = null; Path testBasePath = new Path("/test/csum"); // create args Path path1 = new Path(testBasePath, "file_wtih_crc1"); Path path2 = new Path(testBasePath, "file_with_crc2"); ChecksumOpt opt1 = new ChecksumOpt(DataChecksum.Type.CRC32C, 512); ChecksumOpt opt2 = new ChecksumOpt(DataChecksum.Type.CRC32, 512); // common args FsPermission perm = FsPermission.getDefault().applyUMask( FsPermission.getUMask(conf)); EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.OVERWRITE, CreateFlag.CREATE); short repl = 1; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); FileSystem dfs = cluster.getFileSystem(); dfs.mkdirs(testBasePath); // create two files with different checksum types FSDataOutputStream out1 = dfs.create(path1, perm, flags, 4096, repl, 131072L, null, opt1); FSDataOutputStream out2 = dfs.create(path2, perm, flags, 4096, repl, 131072L, null, opt2); for (int i = 0; i < 1024; i++) { out1.write(i); out2.write(i); } out1.close(); out2.close(); // the two checksums must be different. MD5MD5CRC32FileChecksum sum1 = (MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path1); MD5MD5CRC32FileChecksum sum2 = (MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path2); assertFalse(sum1.equals(sum2)); // check the individual params assertEquals(DataChecksum.Type.CRC32C, sum1.getCrcType()); assertEquals(DataChecksum.Type.CRC32, sum2.getCrcType()); } finally { if (cluster != null) { cluster.getFileSystem().delete(testBasePath, true); cluster.shutdown(); } } } @Test @Timeout(value = 60) public void testFileCloseStatus() throws IOException { Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); DistributedFileSystem fs = cluster.getFileSystem(); try { // create a new file. Path file = new Path("/simpleFlush.dat"); FSDataOutputStream output = fs.create(file); // write to file output.writeBytes("Some test data"); output.flush(); assertFalse(fs.isFileClosed(file), "File status should be open"); output.close(); assertTrue(fs.isFileClosed(file), "File status should be closed"); } finally { cluster.shutdown(); } } @Test public void testCreateWithStoragePolicy() throws Throwable { Configuration conf = getTestConfiguration(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .storageTypes( new StorageType[] {StorageType.DISK, StorageType.ARCHIVE, StorageType.SSD}).storagesPerDatanode(3).build()) { DistributedFileSystem fs = cluster.getFileSystem(); Path file1 = new Path("/tmp/file1"); Path file2 = new Path("/tmp/file2"); fs.mkdirs(new Path("/tmp")); fs.setStoragePolicy(new Path("/tmp"), "ALL_SSD"); FSDataOutputStream outputStream = fs.createFile(file1) .storagePolicyName("COLD").build(); outputStream.write(1); outputStream.close(); assertEquals(StorageType.ARCHIVE, DFSTestUtil.getAllBlocks(fs, file1) .get(0).getStorageTypes()[0]); assertEquals(fs.getStoragePolicy(file1).getName(), "COLD"); // Check with storage policy not specified. outputStream = fs.createFile(file2).build(); outputStream.write(1); outputStream.close(); assertEquals(StorageType.SSD, DFSTestUtil.getAllBlocks(fs, file2).get(0) .getStorageTypes()[0]); assertEquals(fs.getStoragePolicy(file2).getName(), "ALL_SSD"); // Check with default storage policy. outputStream = fs.createFile(new Path("/default")).build(); outputStream.write(1); outputStream.close(); assertEquals(StorageType.DISK, DFSTestUtil.getAllBlocks(fs, new Path("/default")).get(0) .getStorageTypes()[0]); assertEquals(fs.getStoragePolicy(new Path("/default")).getName(), "HOT"); } } @Test @Timeout(value = 60) public void testListFiles() throws IOException { Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { DistributedFileSystem fs = cluster.getFileSystem(); final Path relative = new Path("relative"); fs.create(new Path(relative, "foo")).close(); final List<LocatedFileStatus> retVal = new ArrayList<>(); final RemoteIterator<LocatedFileStatus> iter = fs.listFiles(relative, true); while (iter.hasNext()) { retVal.add(iter.next()); } System.out.println("retVal = " + retVal); } finally { cluster.shutdown(); } } @Test public void testListFilesRecursive() throws IOException { Configuration conf = getTestConfiguration(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();) { DistributedFileSystem fs = cluster.getFileSystem(); // Create some directories and files. Path dir = new Path("/dir"); Path subDir1 = fs.makeQualified(new Path(dir, "subDir1")); Path subDir2 = fs.makeQualified(new Path(dir, "subDir2")); fs.create(new Path(dir, "foo1")).close(); fs.create(new Path(dir, "foo2")).close(); fs.create(new Path(subDir1, "foo3")).close(); fs.create(new Path(subDir2, "foo4")).close(); // Mock the filesystem, and throw FNF when listing is triggered for the subdirectory. FileSystem mockFs = spy(fs); Mockito.doThrow(new FileNotFoundException("")).when(mockFs).listLocatedStatus(eq(subDir1)); List<LocatedFileStatus> str = RemoteIterators.toList(mockFs.listFiles(dir, true)); assertThat(str).hasSize(3); // Mock the filesystem to depict a scenario where the directory got deleted and a file // got created with the same name. Mockito.doReturn(getMockedIterator(subDir1)).when(mockFs).listLocatedStatus(eq(subDir1)); str = RemoteIterators.toList(mockFs.listFiles(dir, true)); assertThat(str).hasSize(4); } } private static RemoteIterator<LocatedFileStatus> getMockedIterator(Path subDir1) { return new RemoteIterator<LocatedFileStatus>() { private int remainingEntries = 1; @Override public boolean hasNext() throws IOException { return remainingEntries > 0; } @Override public LocatedFileStatus next() throws IOException { remainingEntries--; return new LocatedFileStatus(0, false, 1, 1024, 0L, 0, null, null, null, null, subDir1, false, false, false, null); } }; } @Test public void testListStatusOfSnapshotDirs() throws IOException { MiniDFSCluster cluster = new MiniDFSCluster.Builder(getTestConfiguration()) .build(); try { DistributedFileSystem dfs = cluster.getFileSystem(); dfs.create(new Path("/parent/test1/dfsclose/file-0")); Path snapShotDir = new Path("/parent/test1/"); dfs.allowSnapshot(snapShotDir); FileStatus status = dfs.getFileStatus(new Path("/parent/test1")); assertTrue(status.isSnapshotEnabled()); status = dfs.getFileStatus(new Path("/parent/")); assertFalse(status.isSnapshotEnabled()); } finally { cluster.shutdown(); } } @Test @Timeout(value = 10) public void testDFSClientPeerReadTimeout() throws IOException { final int timeout = 1000; final Configuration conf = getTestConfiguration(); conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout); // only need cluster to create a dfs client to get a peer final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { cluster.waitActive(); DistributedFileSystem dfs = cluster.getFileSystem(); // use a dummy socket to ensure the read timesout ServerSocket socket = new ServerSocket(0); Peer peer = dfs.getClient().newConnectedPeer( (InetSocketAddress) socket.getLocalSocketAddress(), null, null); long start = Time.now(); try { peer.getInputStream().read(); fail("read should timeout"); } catch (SocketTimeoutException ste) { long delta = Time.now() - start; if (delta < timeout*0.9) { throw new IOException("read timedout too soon in " + delta + " ms.", ste); } if (delta > timeout*1.1) { throw new IOException("read timedout too late in " + delta + " ms.", ste); } } } finally { cluster.shutdown(); } } @Test @Timeout(value = 60) public void testGetServerDefaults() throws IOException { Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { cluster.waitActive(); DistributedFileSystem dfs = cluster.getFileSystem(); FsServerDefaults fsServerDefaults = dfs.getServerDefaults(); assertNotNull(fsServerDefaults); } finally { cluster.shutdown(); } } @Test @Timeout(value = 10) public void testDFSClientPeerWriteTimeout() throws IOException { final int timeout = 1000; final Configuration conf = getTestConfiguration(); conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout); // only need cluster to create a dfs client to get a peer final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { cluster.waitActive(); DistributedFileSystem dfs = cluster.getFileSystem(); // Write 10 MB to a dummy socket to ensure the write times out ServerSocket socket = new ServerSocket(0); Peer peer = dfs.getClient().newConnectedPeer( (InetSocketAddress) socket.getLocalSocketAddress(), null, null); long start = Time.now(); try { byte[] buf = new byte[10 * 1024 * 1024]; peer.getOutputStream().write(buf); long delta = Time.now() - start; fail("write finish in " + delta + " ms" + "but should timedout"); } catch (SocketTimeoutException ste) { long delta = Time.now() - start; if (delta < timeout * 0.9) { throw new IOException("write timedout too soon in " + delta + " ms.", ste); } if (delta > timeout * 1.2) { throw new IOException("write timedout too late in " + delta + " ms.", ste); } } } finally { cluster.shutdown(); } } @Test @Timeout(value = 30) public void testTotalDfsUsed() throws Exception { Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); FileSystem fs = cluster.getFileSystem(); // create file under root FSDataOutputStream File1 = fs.create(new Path("/File1")); File1.write("hi".getBytes()); File1.close(); // create file under sub-folder FSDataOutputStream File2 = fs.create(new Path("/Folder1/File2")); File2.write("hi".getBytes()); File2.close(); // getUsed(Path) should return total len of all the files from a path assertEquals(2, fs.getUsed(new Path("/Folder1"))); //getUsed() should return total length of all files in filesystem assertEquals(4, fs.getUsed()); } finally { if (cluster != null) { cluster.shutdown(); cluster = null; } } } @Test public void testDFSCloseFilesBeingWritten() throws Exception { Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); DistributedFileSystem fileSys = cluster.getFileSystem(); // Create one file then delete it to trigger the FileNotFoundException // when closing the file. fileSys.create(new Path("/test/dfsclose/file-0")); fileSys.delete(new Path("/test/dfsclose/file-0"), true); DFSClient dfsClient = fileSys.getClient(); // Construct a new dfsClient to get the same LeaseRenewer instance, // to avoid the original client being added to the leaseRenewer again. DFSClient newDfsClient = new DFSClient(cluster.getFileSystem(0).getUri(), conf); LeaseRenewer leaseRenewer = newDfsClient.getLeaseRenewer(); dfsClient.closeAllFilesBeingWritten(false); // Remove new dfsClient in leaseRenewer leaseRenewer.closeClient(newDfsClient); // The list of clients corresponding to this renewer should be empty assertEquals(true, leaseRenewer.isEmpty()); assertEquals(true, dfsClient.isFilesBeingWrittenEmpty()); } finally { if (cluster != null) { cluster.shutdown(); } } } private void testBuilderSetters(DistributedFileSystem fs) { Path testFilePath = new Path("/testBuilderSetters"); HdfsDataOutputStreamBuilder builder = fs.createFile(testFilePath); builder.append().overwrite(false).newBlock().lazyPersist().noLocalWrite() .ecPolicyName("ec-policy").noLocalRack(); EnumSet<CreateFlag> flags = builder.getFlags(); assertTrue(flags.contains(CreateFlag.APPEND)); assertTrue(flags.contains(CreateFlag.CREATE)); assertTrue(flags.contains(CreateFlag.NEW_BLOCK)); assertTrue(flags.contains(CreateFlag.NO_LOCAL_WRITE)); assertFalse(flags.contains(CreateFlag.OVERWRITE)); assertFalse(flags.contains(CreateFlag.SYNC_BLOCK)); assertTrue(flags.contains(CreateFlag.NO_LOCAL_RACK)); assertEquals("ec-policy", builder.getEcPolicyName()); assertFalse(builder.shouldReplicate()); } @Test public void testHdfsDataOutputStreamBuilderSetParameters() throws IOException { Configuration conf = getTestConfiguration(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1).build()) { cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); testBuilderSetters(fs); } } @Test public void testDFSDataOutputStreamBuilderForCreation() throws Exception { Configuration conf = getTestConfiguration(); String testFile = "/testDFSDataOutputStreamBuilder"; Path testFilePath = new Path(testFile); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1).build()) { DistributedFileSystem fs = cluster.getFileSystem(); // Before calling build(), no change was made in the file system HdfsDataOutputStreamBuilder builder = fs.createFile(testFilePath) .blockSize(4096).replication((short)1); assertFalse(fs.exists(testFilePath)); // Test create an empty file try (FSDataOutputStream out = fs.createFile(testFilePath).build()) { LOG.info("Test create an empty file"); } // Test create a file with content, and verify the content String content = "This is a test!"; try (FSDataOutputStream out1 = fs.createFile(testFilePath) .bufferSize(4096) .replication((short) 1) .blockSize(4096) .build()) { byte[] contentOrigin = content.getBytes(StandardCharsets.UTF_8); out1.write(contentOrigin); } ContractTestUtils.verifyFileContents(fs, testFilePath, content.getBytes()); try (FSDataOutputStream out = fs.createFile(testFilePath).overwrite(false) .build()) { fail("it should fail to overwrite an existing file"); } catch (FileAlreadyExistsException e) { // As expected, ignore. } Path nonParentFile = new Path("/parent/test"); try (FSDataOutputStream out = fs.createFile(nonParentFile).build()) { fail("parent directory not exist"); } catch (FileNotFoundException e) { // As expected. } assertFalse(fs.exists(new Path("/parent")), "parent directory should not be created"); try (FSDataOutputStream out = fs.createFile(nonParentFile).recursive() .build()) { out.write(1); } assertTrue(fs.exists(new Path("/parent")), "parent directory has not been created"); } } @Test public void testDFSDataOutputStreamBuilderForAppend() throws IOException { Configuration conf = getTestConfiguration(); String testFile = "/testDFSDataOutputStreamBuilderForAppend"; Path path = new Path(testFile); Random random = new Random(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1).build()) { DistributedFileSystem fs = cluster.getFileSystem(); byte[] buf = new byte[16]; random.nextBytes(buf); try (FSDataOutputStream out = fs.appendFile(path).build()) { out.write(buf); fail("should fail on appending to non-existent file"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("non-existent", e); } random.nextBytes(buf); try (FSDataOutputStream out = fs.createFile(path).build()) { out.write(buf); } random.nextBytes(buf); try (FSDataOutputStream out = fs.appendFile(path).build()) { out.write(buf); } FileStatus status = fs.getFileStatus(path); assertEquals(16 * 2, status.getLen()); } } @Test public void testSuperUserPrivilege() throws Exception { HdfsConfiguration conf = getTestConfiguration(); File tmpDir = GenericTestUtils.getTestDir(UUID.randomUUID().toString()); final Path jksPath = new Path(tmpDir.toString(), "test.jks"); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); Path dir = new Path("/testPrivilege"); dfs.mkdirs(dir); final KeyProvider provider = cluster.getNameNode().getNamesystem().getProvider(); final KeyProvider.Options options = KeyProvider.options(conf); provider.createKey("key", options); provider.flush(); // Create a non-super user. UserGroupInformation user = UserGroupInformation.createUserForTesting( "Non_SuperUser", new String[] {"Non_SuperGroup"}); DistributedFileSystem userfs = (DistributedFileSystem) user.doAs( (PrivilegedExceptionAction<FileSystem>) () -> FileSystem.get(conf)); LambdaTestUtils.intercept(AccessControlException.class, "Superuser privilege is required", () -> userfs.createEncryptionZone(dir, "key")); RemoteException re = LambdaTestUtils.intercept(RemoteException.class, "Superuser privilege is required", () -> userfs.listEncryptionZones().hasNext()); assertTrue(re.unwrapRemoteException() instanceof AccessControlException); re = LambdaTestUtils.intercept(RemoteException.class, "Superuser privilege is required", () -> userfs.listReencryptionStatus().hasNext()); assertTrue(re.unwrapRemoteException() instanceof AccessControlException); LambdaTestUtils.intercept(AccessControlException.class, "Superuser privilege is required", () -> user.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { cluster.getNameNode().getRpcServer().rollEditLog(); return null; } })); } } @Test public void testListingStoragePolicyNonSuperUser() throws Exception { HdfsConfiguration conf = getTestConfiguration(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); Path dir = new Path("/dir"); dfs.mkdirs(dir); dfs.setPermission(dir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); // Create a non-super user. UserGroupInformation user = UserGroupInformation.createUserForTesting( "Non_SuperUser", new String[] {"Non_SuperGroup"}); DistributedFileSystem userfs = (DistributedFileSystem) user.doAs( (PrivilegedExceptionAction<FileSystem>) () -> FileSystem.get(conf)); Path sDir = new Path("/dir/sPolicy"); userfs.mkdirs(sDir); userfs.setStoragePolicy(sDir, "COLD"); HdfsFileStatus[] list = userfs.getClient() .listPaths(dir.toString(), HdfsFileStatus.EMPTY_NAME) .getPartialListing(); assertEquals(HdfsConstants.COLD_STORAGE_POLICY_ID, list[0].getStoragePolicy()); } } @Test public void testRemoveErasureCodingPolicy() throws Exception { Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); DistributedFileSystem fs = cluster.getFileSystem(); ECSchema toAddSchema = new ECSchema("rs", 3, 2); ErasureCodingPolicy toAddPolicy = new ErasureCodingPolicy(toAddSchema, 128 * 1024, (byte) 254); String policyName = toAddPolicy.getName(); ErasureCodingPolicy[] policies = new ErasureCodingPolicy[]{toAddPolicy}; fs.addErasureCodingPolicies(policies); assertEquals(policyName, ErasureCodingPolicyManager.getInstance(). getByName(policyName).getName()); fs.removeErasureCodingPolicy(policyName); assertEquals(policyName, ErasureCodingPolicyManager.getInstance(). getRemovedPolicies().get(0).getName()); // remove erasure coding policy as a user without privilege UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting( "ProbablyNotARealUserName", new String[] {"ShangriLa"}); final MiniDFSCluster finalCluster = cluster; fakeUGI.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { DistributedFileSystem fs = finalCluster.getFileSystem(); try { fs.removeErasureCodingPolicy(policyName); fail(); } catch (AccessControlException ace) { GenericTestUtils.assertExceptionContains("Access denied for user " + "ProbablyNotARealUserName. Superuser privilege is required", ace); } return null; } }); } finally { if (cluster != null) { cluster.shutdown(); } } } @Test public void testEnableAndDisableErasureCodingPolicy() throws Exception { Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); DistributedFileSystem fs = cluster.getFileSystem(); ECSchema toAddSchema = new ECSchema("rs", 3, 2); ErasureCodingPolicy toAddPolicy = new ErasureCodingPolicy(toAddSchema, 128 * 1024, (byte) 254); String policyName = toAddPolicy.getName(); ErasureCodingPolicy[] policies = new ErasureCodingPolicy[]{toAddPolicy}; fs.addErasureCodingPolicies(policies); assertEquals(policyName, ErasureCodingPolicyManager.getInstance(). getByName(policyName).getName()); fs.enableErasureCodingPolicy(policyName); assertEquals(policyName, ErasureCodingPolicyManager.getInstance(). getEnabledPolicyByName(policyName).getName()); fs.disableErasureCodingPolicy(policyName); assertNull(ErasureCodingPolicyManager.getInstance(). getEnabledPolicyByName(policyName)); //test enable a policy that doesn't exist try { fs.enableErasureCodingPolicy("notExistECName"); fail("enable the policy that doesn't exist should fail"); } catch (Exception e) { GenericTestUtils.assertExceptionContains("does not exist", e); // pass } //test disable a policy that doesn't exist try { fs.disableErasureCodingPolicy("notExistECName"); fail("disable the policy that doesn't exist should fail"); } catch (Exception e) { GenericTestUtils.assertExceptionContains("does not exist", e); // pass } // disable and enable erasure coding policy as a user without privilege UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting( "ProbablyNotARealUserName", new String[] {"ShangriLa"}); final MiniDFSCluster finalCluster = cluster; fakeUGI.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { DistributedFileSystem fs = finalCluster.getFileSystem(); try { fs.disableErasureCodingPolicy(policyName); fail(); } catch (AccessControlException ace) { GenericTestUtils.assertExceptionContains("Access denied for user " + "ProbablyNotARealUserName. Superuser privilege is required", ace); } try { fs.enableErasureCodingPolicy(policyName); fail(); } catch (AccessControlException ace) { GenericTestUtils.assertExceptionContains("Access denied for user " + "ProbablyNotARealUserName. Superuser privilege is required", ace); } return null; } }); } finally { if (cluster != null) { cluster.shutdown(); } } } @Test public void testStorageFavouredNodes() throws IOException, InterruptedException, TimeoutException { Configuration conf = getTestConfiguration(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .storageTypes(new StorageType[] {StorageType.SSD, StorageType.DISK}) .numDataNodes(3).storagesPerDatanode(2).build()) { DistributedFileSystem fs = cluster.getFileSystem(); Path file1 = new Path("/tmp/file1"); fs.mkdirs(new Path("/tmp")); fs.setStoragePolicy(new Path("/tmp"), "ONE_SSD"); InetSocketAddress[] addrs = {cluster.getDataNodes().get(0).getXferAddress()}; HdfsDataOutputStream stream = fs.create(file1, FsPermission.getDefault(), false, 1024, (short) 3, 1024, null, addrs); stream.write("Some Bytes".getBytes()); stream.close(); DFSTestUtil.waitReplication(fs, file1, (short) 3); BlockLocation[] locations = fs.getClient() .getBlockLocations(file1.toUri().getPath(), 0, Long.MAX_VALUE); int numSSD = Collections.frequency( Arrays.asList(locations[0].getStorageTypes()), StorageType.SSD); assertEquals(1, numSSD, "Number of SSD should be 1 but was : " + numSSD); } } @Test public void testGetECTopologyResultForPolicies() throws Exception { Configuration conf = getTestConfiguration(); try (MiniDFSCluster cluster = DFSTestUtil.setupCluster(conf, 9, 3, 0)) { DistributedFileSystem dfs = cluster.getFileSystem(); dfs.enableErasureCodingPolicy("RS-6-3-1024k"); // No policies specified should return result for the enabled policy. ECTopologyVerifierResult result = dfs.getECTopologyResultForPolicies(); assertTrue(result.isSupported()); // Specified policy requiring more datanodes than present in // the actual cluster. result = dfs.getECTopologyResultForPolicies("RS-10-4-1024k"); assertFalse(result.isSupported()); // Specify multiple policies that require datanodes equlal or less then // present in the actual cluster result = dfs.getECTopologyResultForPolicies("XOR-2-1-1024k", "RS-3-2-1024k"); assertTrue(result.isSupported()); // Specify multiple policies with one policy requiring more datanodes than // present in the actual cluster result = dfs.getECTopologyResultForPolicies("RS-10-4-1024k", "RS-3-2-1024k"); assertFalse(result.isSupported()); // Enable a policy requiring more datanodes than present in // the actual cluster. dfs.enableErasureCodingPolicy("RS-10-4-1024k"); result = dfs.getECTopologyResultForPolicies(); assertFalse(result.isSupported()); } } @Test public void testECCloseCommittedBlock() throws Exception { HdfsConfiguration conf = getTestConfiguration(); conf.setInt(DFS_NAMENODE_FILE_CLOSE_NUM_COMMITTED_ALLOWED_KEY, 1); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3).build()) { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); Path dir = new Path("/dir"); dfs.mkdirs(dir); dfs.enableErasureCodingPolicy("XOR-2-1-1024k"); dfs.setErasureCodingPolicy(dir, "XOR-2-1-1024k"); try (FSDataOutputStream str = dfs.create(new Path("/dir/file"));) { for (int i = 0; i < 1024 * 1024 * 4; i++) { str.write(i); } DataNodeTestUtils.pauseIBR(cluster.getDataNodes().get(0)); DataNodeTestUtils.pauseIBR(cluster.getDataNodes().get(1)); } DataNodeTestUtils.resumeIBR(cluster.getDataNodes().get(0)); DataNodeTestUtils.resumeIBR(cluster.getDataNodes().get(1)); // Check if the blockgroup isn't complete then file close shouldn't be // success with block in committed state. cluster.getDataNodes().get(0).shutdown(); FSDataOutputStream str = dfs.create(new Path("/dir/file1")); for (int i = 0; i < 1024 * 1024 * 4; i++) { str.write(i); } DataNodeTestUtils.pauseIBR(cluster.getDataNodes().get(1)); DataNodeTestUtils.pauseIBR(cluster.getDataNodes().get(2)); LambdaTestUtils.intercept(IOException.class, "", () -> str.close()); } } @Test public void testGetTrashRoot() throws IOException { Configuration conf = getTestConfiguration(); conf.setBoolean("dfs.namenode.snapshot.trashroot.enabled", true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); try { DistributedFileSystem dfs = cluster.getFileSystem(); Path testDir = new Path("/ssgtr/test1/"); Path testDirTrashRoot = new Path(testDir, FileSystem.TRASH_PREFIX); Path file0path = new Path(testDir, "file-0"); dfs.create(file0path).close(); Path trBeforeAllowSnapshot = dfs.getTrashRoot(file0path); String trBeforeAllowSnapshotStr = trBeforeAllowSnapshot.toUri().getPath(); // The trash root should be in user home directory String homeDirStr = dfs.getHomeDirectory().toUri().getPath(); assertTrue(trBeforeAllowSnapshotStr.startsWith(homeDirStr)); dfs.allowSnapshot(testDir); // Provision trash root // Note: DFS#allowSnapshot doesn't auto create trash root. // Only HdfsAdmin#allowSnapshot creates trash root when // dfs.namenode.snapshot.trashroot.enabled is set to true on NameNode. dfs.provisionSnapshotTrash(testDir, TRASH_PERMISSION); // Expect trash root to be created with permission 777 and sticky bit FileStatus trashRootFileStatus = dfs.getFileStatus(testDirTrashRoot); assertEquals(TRASH_PERMISSION, trashRootFileStatus.getPermission()); Path trAfterAllowSnapshot = dfs.getTrashRoot(file0path); String trAfterAllowSnapshotStr = trAfterAllowSnapshot.toUri().getPath(); // The trash root should now be in the snapshot root String testDirStr = testDir.toUri().getPath(); assertTrue(trAfterAllowSnapshotStr.startsWith(testDirStr)); // test2Dir has the same prefix as testDir, but not snapshottable Path test2Dir = new Path("/ssgtr/test12/"); Path file1path = new Path(test2Dir, "file-1"); trAfterAllowSnapshot = dfs.getTrashRoot(file1path); trAfterAllowSnapshotStr = trAfterAllowSnapshot.toUri().getPath(); // The trash root should not be in the snapshot root assertFalse(trAfterAllowSnapshotStr.startsWith(testDirStr)); assertTrue(trBeforeAllowSnapshotStr.startsWith(homeDirStr)); // Cleanup // DFS#disallowSnapshot would remove empty trash root without throwing. dfs.disallowSnapshot(testDir); dfs.delete(testDir, true); dfs.delete(test2Dir, true); } finally { if (cluster != null) { cluster.shutdown(); } } } private boolean isPathInUserHome(String pathStr, DistributedFileSystem dfs) { String homeDirStr = dfs.getHomeDirectory().toUri().getPath(); return pathStr.startsWith(homeDirStr); } @Test public void testGetTrashRoots() throws IOException { Configuration conf = getTestConfiguration(); conf.setBoolean("dfs.namenode.snapshot.trashroot.enabled", true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); try { DistributedFileSystem dfs = cluster.getFileSystem(); Path testDir = new Path("/ssgtr/test1/"); Path file0path = new Path(testDir, "file-0"); dfs.create(file0path); // Create user trash Path currUserHome = dfs.getHomeDirectory(); Path currUserTrash = new Path(currUserHome, FileSystem.TRASH_PREFIX); dfs.mkdirs(currUserTrash); // Create trash inside test directory Path testDirTrash = new Path(testDir, FileSystem.TRASH_PREFIX); Path testDirTrashCurrUser = new Path(testDirTrash, UserGroupInformation.getCurrentUser().getShortUserName()); dfs.mkdirs(testDirTrashCurrUser); Collection<FileStatus> trashRoots = dfs.getTrashRoots(false); // getTrashRoots should only return 1 empty user trash in the home dir now assertEquals(1, trashRoots.size()); FileStatus firstFileStatus = trashRoots.iterator().next(); String pathStr = firstFileStatus.getPath().toUri().getPath(); assertTrue(isPathInUserHome(pathStr, dfs)); // allUsers should not make a difference for now because we have one user Collection<FileStatus> trashRootsAllUsers = dfs.getTrashRoots(true); assertEquals(trashRoots, trashRootsAllUsers); dfs.allowSnapshot(testDir); Collection<FileStatus> trashRootsAfter = dfs.getTrashRoots(false); // getTrashRoots should return 1 more trash root inside snapshottable dir assertEquals(trashRoots.size() + 1, trashRootsAfter.size()); boolean foundUserHomeTrash = false; boolean foundSnapDirUserTrash = false; String testDirStr = testDir.toUri().getPath(); for (FileStatus fileStatus : trashRootsAfter) { String currPathStr = fileStatus.getPath().toUri().getPath(); if (isPathInUserHome(currPathStr, dfs)) { foundUserHomeTrash = true; } else if (currPathStr.startsWith(testDirStr)) { foundSnapDirUserTrash = true; } } assertTrue(foundUserHomeTrash); assertTrue(foundSnapDirUserTrash); // allUsers should not make a difference for now because we have one user Collection<FileStatus> trashRootsAfterAllUsers = dfs.getTrashRoots(true); assertEquals(trashRootsAfter, trashRootsAfterAllUsers); // Create trash root for user0 UserGroupInformation ugi = UserGroupInformation.createRemoteUser("user0"); String user0HomeStr = DFSUtilClient.getHomeDirectory(conf, ugi); Path user0Trash = new Path(user0HomeStr, FileSystem.TRASH_PREFIX); dfs.mkdirs(user0Trash); // allUsers flag set to false should be unaffected Collection<FileStatus> trashRootsAfter2 = dfs.getTrashRoots(false); assertEquals(trashRootsAfter, trashRootsAfter2); // allUsers flag set to true should include new user's trash trashRootsAfter2 = dfs.getTrashRoots(true); assertEquals(trashRootsAfter.size() + 1, trashRootsAfter2.size()); // Create trash root inside the snapshottable directory for user0 Path testDirTrashUser0 = new Path(testDirTrash, ugi.getShortUserName()); dfs.mkdirs(testDirTrashUser0); Collection<FileStatus> trashRootsAfter3 = dfs.getTrashRoots(true); assertEquals(trashRootsAfter2.size() + 1, trashRootsAfter3.size()); // Cleanup dfs.delete(new Path(testDir, FileSystem.TRASH_PREFIX), true); dfs.disallowSnapshot(testDir); dfs.delete(testDir, true); } finally { if (cluster != null) { cluster.shutdown(); } } } @Test public void testGetTrashRootsOnSnapshottableDirWithEZ() throws IOException, NoSuchAlgorithmException { Configuration conf = getTestConfiguration(); conf.setBoolean("dfs.namenode.snapshot.trashroot.enabled", true); // Set encryption zone config File tmpDir = GenericTestUtils.getTestDir(UUID.randomUUID().toString()); final Path jksPath = new Path(tmpDir.toString(), "test.jks"); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); // Create key for EZ final KeyProvider provider = cluster.getNameNode().getNamesystem().getProvider(); final KeyProvider.Options options = KeyProvider.options(conf); provider.createKey("key", options); provider.flush(); try { DistributedFileSystem dfs = cluster.getFileSystem(); Path testDir = new Path("/ssgtr/test2/"); dfs.mkdirs(testDir); dfs.createEncryptionZone(testDir, "key"); // Create trash inside test directory Path testDirTrash = new Path(testDir, FileSystem.TRASH_PREFIX); Path testDirTrashCurrUser = new Path(testDirTrash, UserGroupInformation.getCurrentUser().getShortUserName()); dfs.mkdirs(testDirTrashCurrUser); Collection<FileStatus> trashRoots = dfs.getTrashRoots(false); assertEquals(1, trashRoots.size()); FileStatus firstFileStatus = trashRoots.iterator().next(); String pathStr = firstFileStatus.getPath().toUri().getPath(); String testDirStr = testDir.toUri().getPath(); assertTrue(pathStr.startsWith(testDirStr)); dfs.allowSnapshot(testDir); Collection<FileStatus> trashRootsAfter = dfs.getTrashRoots(false); // getTrashRoots should give the same result assertEquals(trashRoots, trashRootsAfter); // Cleanup dfs.delete(new Path(testDir, FileSystem.TRASH_PREFIX), true); dfs.disallowSnapshot(testDir); dfs.delete(testDir, true); } finally { if (cluster != null) { cluster.shutdown(); } } } @Test public void testGetTrashRootOnSnapshottableDirInEZ() throws IOException, NoSuchAlgorithmException { Configuration conf = getTestConfiguration(); conf.setBoolean("dfs.namenode.snapshot.trashroot.enabled", true); // Set EZ config File tmpDir = GenericTestUtils.getTestDir(UUID.randomUUID().toString()); final Path jksPath = new Path(tmpDir.toString(), "test.jks"); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); // Create key for EZ final KeyProvider provider = cluster.getNameNode().getNamesystem().getProvider(); final KeyProvider.Options options = KeyProvider.options(conf); provider.createKey("key", options); provider.flush(); try { DistributedFileSystem dfs = cluster.getFileSystem(); Path testDir = new Path("/ssgtr/test3ez/"); dfs.mkdirs(testDir); dfs.createEncryptionZone(testDir, "key"); Path testSubD = new Path(testDir, "sssubdir"); Path file1Path = new Path(testSubD, "file1"); dfs.create(file1Path); final Path trBefore = dfs.getTrashRoot(file1Path); final String trBeforeStr = trBefore.toUri().getPath(); // The trash root should be directly under testDir final Path testDirTrash = new Path(testDir, FileSystem.TRASH_PREFIX); final String testDirTrashStr = testDirTrash.toUri().getPath(); assertTrue(trBeforeStr.startsWith(testDirTrashStr)); dfs.allowSnapshot(testSubD); final Path trAfter = dfs.getTrashRoot(file1Path); final String trAfterStr = trAfter.toUri().getPath(); // The trash is now located in the dir inside final Path testSubDirTrash = new Path(testSubD, FileSystem.TRASH_PREFIX); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); final Path testSubDirUserTrash = new Path(testSubDirTrash, ugi.getShortUserName()); final String testSubDirUserTrashStr = testSubDirUserTrash.toUri().getPath(); assertEquals(testSubDirUserTrashStr, trAfterStr); // Cleanup dfs.disallowSnapshot(testSubD); dfs.delete(testDir, true); } finally { if (cluster != null) { cluster.shutdown(); } } } @Test public void testGetTrashRootOnEZInSnapshottableDir() throws IOException, NoSuchAlgorithmException { Configuration conf = getTestConfiguration(); conf.setBoolean("dfs.namenode.snapshot.trashroot.enabled", true); // Set EZ config File tmpDir = GenericTestUtils.getTestDir(UUID.randomUUID().toString()); final Path jksPath = new Path(tmpDir.toString(), "test.jks"); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); // Create key for EZ final KeyProvider provider = cluster.getNameNode().getNamesystem().getProvider(); final KeyProvider.Options options = KeyProvider.options(conf); provider.createKey("key", options); provider.flush(); try { DistributedFileSystem dfs = cluster.getFileSystem(); Path testDir = new Path("/ssgtr/test3ss/"); dfs.mkdirs(testDir); dfs.allowSnapshot(testDir); Path testSubD = new Path(testDir, "ezsubdir"); dfs.mkdirs(testSubD); Path file1Path = new Path(testSubD, "file1"); dfs.create(file1Path); final Path trBefore = dfs.getTrashRoot(file1Path); final String trBeforeStr = trBefore.toUri().getPath(); // The trash root should be directly under testDir final Path testDirTrash = new Path(testDir, FileSystem.TRASH_PREFIX); final String testDirTrashStr = testDirTrash.toUri().getPath(); assertTrue(trBeforeStr.startsWith(testDirTrashStr)); // Need to remove the file inside the dir to establish EZ dfs.delete(file1Path, false); dfs.createEncryptionZone(testSubD, "key"); dfs.create(file1Path); final Path trAfter = dfs.getTrashRoot(file1Path); final String trAfterStr = trAfter.toUri().getPath(); // The trash is now located in the dir inside final Path testSubDirTrash = new Path(testSubD, FileSystem.TRASH_PREFIX); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); final Path testSubDirUserTrash = new Path(testSubDirTrash, ugi.getShortUserName()); final String testSubDirUserTrashStr = testSubDirUserTrash.toUri().getPath(); assertEquals(testSubDirUserTrashStr, trAfterStr); // Cleanup dfs.disallowSnapshot(testDir); dfs.delete(testDir, true); } finally { if (cluster != null) { cluster.shutdown(); } } } @Test public void testDisallowSnapshotShouldThrowWhenTrashRootExists() throws Exception { Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); try { DistributedFileSystem dfs = cluster.getFileSystem(); Path testDir = new Path("/disallowss/test1/"); Path file0path = new Path(testDir, "file-0"); dfs.create(file0path); dfs.allowSnapshot(testDir); // Create trash root manually Path testDirTrashRoot = new Path(testDir, FileSystem.TRASH_PREFIX); Path dirInsideTrash = new Path(testDirTrashRoot, "user1"); dfs.mkdirs(dirInsideTrash); // Try disallowing snapshot, should throw LambdaTestUtils.intercept(IOException.class, () -> dfs.disallowSnapshot(testDir)); // Remove the trash root and try again, should pass this time dfs.delete(testDirTrashRoot, true); dfs.disallowSnapshot(testDir); // Cleanup dfs.delete(testDir, true); } finally { if (cluster != null) { cluster.shutdown(); } } } @Test public void testNameNodeCreateSnapshotTrashRootOnStartup() throws Exception { // Start NN with dfs.namenode.snapshot.trashroot.enabled=false Configuration conf = getTestConfiguration(); conf.setBoolean("dfs.namenode.snapshot.trashroot.enabled", false); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); try { DistributedFileSystem dfs = cluster.getFileSystem(); final Path testDir = new Path("/disallowss/test2/"); final Path file0path = new Path(testDir, "file-0"); dfs.create(file0path).close(); dfs.allowSnapshot(testDir); // .Trash won't be created right now since snapshot trash is disabled final Path trashRoot = new Path(testDir, FileSystem.TRASH_PREFIX); assertFalse(dfs.exists(trashRoot)); // Set dfs.namenode.snapshot.trashroot.enabled=true conf.setBoolean("dfs.namenode.snapshot.trashroot.enabled", true); cluster.setNameNodeConf(0, conf); cluster.shutdown(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0); conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1); cluster.restartNameNode(0); dfs = cluster.getFileSystem(); assertTrue(cluster.getNameNode().isInSafeMode()); // Check .Trash existence, won't be created now assertFalse(dfs.exists(trashRoot)); // Start a datanode cluster.startDataNodes(conf, 1, true, null, null); // Wait long enough for safemode check to retire try { Thread.sleep(1000); } catch (InterruptedException ignored) {} // Check .Trash existence, should be created now assertTrue(dfs.exists(trashRoot)); // Check permission FileStatus trashRootStatus = dfs.getFileStatus(trashRoot); assertNotNull(trashRootStatus); assertEquals(TRASH_PERMISSION, trashRootStatus.getPermission()); // Cleanup dfs.delete(trashRoot, true); dfs.disallowSnapshot(testDir); dfs.delete(testDir, true); } finally { if (cluster != null) { cluster.shutdown(); } } } @Test public void testSingleRackFailureDuringPipelineSetupMinReplicationPossible() throws Exception { Configuration conf = getTestConfiguration(); conf.setClass( DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, BlockPlacementPolicyRackFaultTolerant.class, BlockPlacementPolicy.class); conf.setBoolean( HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY, false); conf.setInt(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure. MIN_REPLICATION, 2); // 3 racks & 3 nodes. 1 per rack try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) .racks(new String[] {"/rack1", "/rack2", "/rack3"}).build()) { cluster.waitClusterUp(); DistributedFileSystem fs = cluster.getFileSystem(); // kill one DN, so only 2 racks stays with active DN cluster.stopDataNode(0); // create a file with replication 3, for rack fault tolerant BPP, // it should allocate nodes in all 3 racks. DFSTestUtil.createFile(fs, new Path("/testFile"), 1024L, (short) 3, 1024L); } } @Test public void testSingleRackFailureDuringPipelineSetupMinReplicationImpossible() throws Exception { Configuration conf = getTestConfiguration(); conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, BlockPlacementPolicyRackFaultTolerant.class, BlockPlacementPolicy.class); conf.setBoolean(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY, false); conf.setInt(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.MIN_REPLICATION, 3); // 3 racks & 3 nodes. 1 per rack try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) .racks(new String[] {"/rack1", "/rack2", "/rack3"}).build()) { cluster.waitClusterUp(); DistributedFileSystem fs = cluster.getFileSystem(); // kill one DN, so only 2 racks stays with active DN cluster.stopDataNode(0); LambdaTestUtils.intercept(IOException.class, () -> DFSTestUtil.createFile(fs, new Path("/testFile"), 1024L, (short) 3, 1024L)); } } @Test public void testMultipleRackFailureDuringPipelineSetupMinReplicationPossible() throws Exception { Configuration conf = getTestConfiguration(); conf.setClass( DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, BlockPlacementPolicyRackFaultTolerant.class, BlockPlacementPolicy.class); conf.setBoolean( HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY, false); conf.setInt(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure. MIN_REPLICATION, 1); // 3 racks & 3 nodes. 1 per rack try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) .racks(new String[] {"/rack1", "/rack2", "/rack3"}).build()) { cluster.waitClusterUp(); DistributedFileSystem fs = cluster.getFileSystem(); // kill 2 DN, so only 1 racks stays with active DN cluster.stopDataNode(0); cluster.stopDataNode(1); // create a file with replication 3, for rack fault tolerant BPP, // it should allocate nodes in all 3 racks. DFSTestUtil.createFile(fs, new Path("/testFile"), 1024L, (short) 3, 1024L); } } @Test public void testMultipleRackFailureDuringPipelineSetupMinReplicationImpossible() throws Exception { Configuration conf = getTestConfiguration(); conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, BlockPlacementPolicyRackFaultTolerant.class, BlockPlacementPolicy.class); conf.setBoolean( HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY, false); conf.setInt(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure. MIN_REPLICATION, 2); // 3 racks & 3 nodes. 1 per rack try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) .racks(new String[] {"/rack1", "/rack2", "/rack3"}).build()) { cluster.waitClusterUp(); DistributedFileSystem fs = cluster.getFileSystem(); // kill 2 DN, so only 1 rack stays with active DN cluster.stopDataNode(0); cluster.stopDataNode(1); LambdaTestUtils.intercept(IOException.class, () -> DFSTestUtil.createFile(fs, new Path("/testFile"), 1024L, (short) 3, 1024L)); } } @Test public void testAllRackFailureDuringPipelineSetup() throws Exception { Configuration conf = getTestConfiguration(); conf.setClass( DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, BlockPlacementPolicyRackFaultTolerant.class, BlockPlacementPolicy.class); conf.setBoolean( HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY, false); // 3 racks & 3 nodes. 1 per rack try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) .racks(new String[] {"/rack1", "/rack2", "/rack3"}).build()) { cluster.waitClusterUp(); DistributedFileSystem fs = cluster.getFileSystem(); // shutdown all DNs cluster.shutdownDataNodes(); // create a file with replication 3, for rack fault tolerant BPP, // it should allocate nodes in all 3 rack but fail because no DNs are present. LambdaTestUtils.intercept(IOException.class, () -> DFSTestUtil.createFile(fs, new Path("/testFile"), 1024L, (short) 3, 1024L)); } } }
that
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
{ "start": 9925, "end": 10072 }
class ____ extends AsyncDispatcher { public static BlockingQueue<Event> eventQueue = new LinkedBlockingQueue<>(); public static
SpyDispatcher
java
apache__hadoop
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ParamFilter.java
{ "start": 2046, "end": 3159 }
class ____ extends HttpServletRequestWrapper { private Map<String, String[]> lowerCaseParams = new HashMap<>(); private CustomHttpServletRequestWrapper(HttpServletRequest request) { super(request); Map<String, String[]> originalParams = request.getParameterMap(); for (Map.Entry<String, String[]> entry : originalParams.entrySet()) { lowerCaseParams.put(entry.getKey().toLowerCase(), entry.getValue()); } } public String getParameter(String name) { String[] values = getParameterValues(name); if (values != null && values.length > 0) { return values[0]; } else { return null; } } @Override public Map<String, String[]> getParameterMap() { return Collections.unmodifiableMap(lowerCaseParams); } @Override public Enumeration<String> getParameterNames() { return Collections.enumeration(lowerCaseParams.keySet()); } @Override public String[] getParameterValues(String name) { return lowerCaseParams.get(name.toLowerCase()); } } }
CustomHttpServletRequestWrapper
java
apache__flink
flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/connector/upserttest/sink/ImmutableByteArrayWrapper.java
{ "start": 1308, "end": 2102 }
class ____ { @VisibleForTesting final byte[] bytes; ImmutableByteArrayWrapper(byte[] bytes) { checkNotNull(bytes); this.bytes = bytes.clone(); } /** * Returns a reference-free copy of the underlying byte[]. * * @return the copied byte[] */ byte[] array() { return bytes.clone(); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ImmutableByteArrayWrapper that = (ImmutableByteArrayWrapper) o; return Arrays.equals(bytes, that.bytes); } @Override public int hashCode() { return Arrays.hashCode(bytes); } }
ImmutableByteArrayWrapper
java
apache__camel
core/camel-api/src/main/java/org/apache/camel/support/jsse/SSLContextClientParameters.java
{ "start": 3224, "end": 3424 }
class ____ additional shared configuration options beyond // cipher suites and protocols, this method needs to address that. return Collections.emptyList(); } /** * This
gets
java
google__error-prone
core/src/test/java/com/google/errorprone/fixes/SuggestedFixesTest.java
{ "start": 37135, "end": 38500 }
class ____ extends BugChecker implements LiteralTreeMatcher, VariableTreeMatcher { @Override public Description matchLiteral(LiteralTree tree, VisitorState state) { if (tree.getValue().equals(42)) { Fix potentialFix = SuggestedFixes.addSuppressWarnings(state, "SuppressMe"); if (potentialFix == null) { return describeMatch(tree); } return describeMatch(tree, potentialFix); } return Description.NO_MATCH; } @Override public Description matchVariable(VariableTree tree, VisitorState state) { // If it's a lambda param, then flag. LambdaExpressionTree enclosingMethod = ASTHelpers.findEnclosingNode(state.getPath(), LambdaExpressionTree.class); if (enclosingMethod != null && enclosingMethod.getParameters().contains(tree)) { return describeMatch(tree, SuggestedFixes.addSuppressWarnings(state, "AParameter")); } return Description.NO_MATCH; } } @Test @org.junit.Ignore("There appears to be an issue parsing lambda comments") public void suppressWarningsFix() { BugCheckerRefactoringTestHelper refactorTestHelper = BugCheckerRefactoringTestHelper.newInstance(SuppressMe.class, getClass()); refactorTestHelper .addInputLines( "in/Test.java", """ public
SuppressMe
java
apache__camel
core/camel-management/src/test/java/org/apache/camel/management/LoadTimerTest.java
{ "start": 1473, "end": 2433 }
class ____ extends ContextTestSupport { private static final int SAMPLES = 2; @Override public boolean isUseRouteBuilder() { return false; } @Test public void testTimer() throws IOException { TimerListenerManager myTimer = new TimerListenerManager(); myTimer.setCamelContext(context); myTimer.start(); TestLoadAware test = new TestLoadAware(); myTimer.addTimerListener(test); try { await().atMost(5, TimeUnit.SECONDS).untilAsserted(() -> { assertTrue(test.counter.intValue() >= SAMPLES); assertFalse(Double.isNaN(test.load.getLoad1())); assertTrue(test.load.getLoad1() > 0.0d); assertTrue(test.load.getLoad1() < SAMPLES); }); } finally { myTimer.removeTimerListener(test); } myTimer.stop(); myTimer.close(); } private static
LoadTimerTest
java
apache__avro
lang/java/avro/src/main/java/org/apache/avro/generic/GenericFixed.java
{ "start": 872, "end": 968 }
interface ____ extends GenericContainer { /** Return the data. */ byte[] bytes(); }
GenericFixed
java
spring-projects__spring-framework
spring-orm/src/test/java/org/springframework/orm/jpa/EntityManagerRuntimeHintsTests.java
{ "start": 1554, "end": 2855 }
class ____ { private final RuntimeHints hints = new RuntimeHints(); @BeforeEach void setup() { AotServices.factories().load(RuntimeHintsRegistrar.class) .forEach(registrar -> registrar.registerHints(this.hints, ClassUtils.getDefaultClassLoader())); } @Test void entityManagerFactoryInfoHasHibernateHints() { assertThat(RuntimeHintsPredicates.proxies().forInterfaces(SessionFactory.class, EntityManagerFactoryInfo.class)) .accepts(this.hints); } @Test void entityManagerProxyHasHibernateHints() { assertThat(RuntimeHintsPredicates.proxies().forInterfaces(Session.class, EntityManagerProxy.class)) .accepts(this.hints); } @Test void entityManagerFactoryHasReflectionHints() { assertThat(RuntimeHintsPredicates.reflection().onMethodInvocation(EntityManagerFactory.class, "getCriteriaBuilder")).accepts(this.hints); assertThat(RuntimeHintsPredicates.reflection().onMethodInvocation(EntityManagerFactory.class, "getMetamodel")).accepts(this.hints); } @Test void sqmQueryHints() { assertThat(RuntimeHintsPredicates.proxies().forInterfaces( SqmQueryImplementor.class, InterpretationsKeySource.class, DomainQueryExecutionContext.class, SelectionQuery.class, CommonQueryContract.class)).accepts(this.hints); } }
EntityManagerRuntimeHintsTests
java
spring-projects__spring-framework
spring-tx/src/main/java/org/springframework/transaction/reactive/TransactionalOperator.java
{ "start": 1738, "end": 2227 }
class ____ object. * * <p><strong>Note:</strong> Transactional Publishers should avoid Subscription * cancellation. See the * <a href="https://docs.spring.io/spring/docs/current/spring-framework-reference/data-access.html#tx-prog-operator-cancel">Cancel Signals</a> * section of the Spring Framework reference for more details. * * @author Mark Paluch * @author Juergen Hoeller * @author Enric Sala * @since 5.2 * @see #execute * @see ReactiveTransactionManager */ public
callback
java
spring-projects__spring-boot
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/task/TaskExecutionProperties.java
{ "start": 6468, "end": 6673 }
enum ____ { /** * Create the task executor if no user-defined executor is present. */ AUTO, /** * Create the task executor even if a user-defined executor is present. */ FORCE } }
Mode
java
apache__flink
flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/src/assigners/LocalityAwareSplitAssignerTest.java
{ "start": 1268, "end": 11686 }
class ____ { private static final Path TEST_PATH = Path.fromLocalFile(new File(System.getProperty("java.io.tmpdir"))); // ------------------------------------------------------------------------ @Test void testAssignmentWithNullHost() { final int numSplits = 50; final String[][] hosts = new String[][] {new String[] {"localhost"}, new String[0]}; // load some splits final Set<FileSourceSplit> splits = new HashSet<>(); for (int i = 0; i < numSplits; i++) { splits.add(createSplit(i, hosts[i % hosts.length])); } // get all available splits final LocalityAwareSplitAssigner ia = new LocalityAwareSplitAssigner(splits); Optional<FileSourceSplit> is; while ((is = ia.getNext(null)).isPresent()) { assertThat(splits.remove(is.get())).isTrue(); } // check we had all assertThat(splits).isEmpty(); assertThat(ia.getNext("")).isNotPresent(); assertThat(ia.getNumberOfRemoteAssignments()).isEqualTo(numSplits); assertThat(ia.getNumberOfLocalAssignments()).isEqualTo(0); } @Test void testAssignmentAllForSameHost() { final int numSplits = 50; // load some splits final Set<FileSourceSplit> splits = new HashSet<>(); for (int i = 0; i < numSplits; i++) { splits.add(createSplit(i, "testhost")); } // get all available splits LocalityAwareSplitAssigner ia = new LocalityAwareSplitAssigner(splits); Optional<FileSourceSplit> is; while ((is = ia.getNext("testhost")).isPresent()) { assertThat(splits.remove(is.get())).isTrue(); } // check we had all assertThat(splits).isEmpty(); assertThat(ia.getNext("")).isNotPresent(); assertThat(ia.getNumberOfRemoteAssignments()).isEqualTo(0); assertThat(ia.getNumberOfLocalAssignments()).isEqualTo(numSplits); } @Test void testAssignmentAllForRemoteHost() { final String[] hosts = {"host1", "host1", "host1", "host2", "host2", "host3"}; final int numSplits = 10 * hosts.length; // load some splits final Set<FileSourceSplit> splits = new HashSet<>(); for (int i = 0; i < numSplits; i++) { splits.add(createSplit(i, hosts[i % hosts.length])); } // get all available splits final LocalityAwareSplitAssigner ia = new LocalityAwareSplitAssigner(splits); Optional<FileSourceSplit> is; while ((is = ia.getNext("testhost")).isPresent()) { assertThat(splits.remove(is.get())).isTrue(); } // check we had all assertThat(splits).isEmpty(); assertThat(ia.getNext("anotherHost")).isNotPresent(); assertThat(ia.getNumberOfRemoteAssignments()).isEqualTo(numSplits); assertThat(ia.getNumberOfLocalAssignments()).isEqualTo(0); } @Test void testAssignmentSomeForRemoteHost() { // host1 reads all local // host2 reads 10 local and 10 remote // host3 reads all remote final String[] hosts = {"host1", "host2", "host3"}; final int numLocalHost1Splits = 20; final int numLocalHost2Splits = 10; final int numRemoteSplits = 30; final int numLocalSplits = numLocalHost1Splits + numLocalHost2Splits; // load local splits int splitCnt = 0; final Set<FileSourceSplit> splits = new HashSet<>(); // host1 splits for (int i = 0; i < numLocalHost1Splits; i++) { splits.add(createSplit(splitCnt++, "host1")); } // host2 splits for (int i = 0; i < numLocalHost2Splits; i++) { splits.add(createSplit(splitCnt++, "host2")); } // load remote splits for (int i = 0; i < numRemoteSplits; i++) { splits.add(createSplit(splitCnt++, "remoteHost")); } // get all available splits final LocalityAwareSplitAssigner ia = new LocalityAwareSplitAssigner(splits); Optional<FileSourceSplit> is; int i = 0; while ((is = ia.getNext(hosts[i++ % hosts.length])).isPresent()) { assertThat(splits.remove(is.get())).isTrue(); } // check we had all assertThat(splits).isEmpty(); assertThat(ia.getNext("anotherHost")).isNotPresent(); assertThat(ia.getNumberOfRemoteAssignments()).isEqualTo(numRemoteSplits); assertThat(ia.getNumberOfLocalAssignments()).isEqualTo(numLocalSplits); } @SuppressWarnings("UnnecessaryLocalVariable") @Test void testAssignmentMultiLocalHost() { final String[] localHosts = {"local1", "local2", "local3"}; final String[] remoteHosts = {"remote1", "remote2", "remote3"}; final String[] requestingHosts = {"local3", "local2", "local1", "other"}; final int numThreeLocalSplits = 10; final int numTwoLocalSplits = 10; final int numOneLocalSplits = 10; final int numLocalSplits = 30; final int numRemoteSplits = 10; final int numSplits = 40; final String[] threeLocalHosts = localHosts; final String[] twoLocalHosts = {localHosts[0], localHosts[1], remoteHosts[0]}; final String[] oneLocalHost = {localHosts[0], remoteHosts[0], remoteHosts[1]}; final String[] noLocalHost = remoteHosts; int splitCnt = 0; final Set<FileSourceSplit> splits = new HashSet<>(); // add splits with three local hosts for (int i = 0; i < numThreeLocalSplits; i++) { splits.add(createSplit(splitCnt++, threeLocalHosts)); } // add splits with two local hosts for (int i = 0; i < numTwoLocalSplits; i++) { splits.add(createSplit(splitCnt++, twoLocalHosts)); } // add splits with two local hosts for (int i = 0; i < numOneLocalSplits; i++) { splits.add(createSplit(splitCnt++, oneLocalHost)); } // add splits with two local hosts for (int i = 0; i < numRemoteSplits; i++) { splits.add(createSplit(splitCnt++, noLocalHost)); } // get all available splits final LocalityAwareSplitAssigner ia = new LocalityAwareSplitAssigner(splits); for (int i = 0; i < numSplits; i++) { final String host = requestingHosts[i % requestingHosts.length]; final Optional<FileSourceSplit> ois = ia.getNext(host); assertThat(ois).isPresent(); final FileSourceSplit is = ois.get(); assertThat(splits.remove(is)).isTrue(); // check priority of split if (host.equals(localHosts[0])) { assertThat(is.hostnames()).isEqualTo(oneLocalHost); } else if (host.equals(localHosts[1])) { assertThat(is.hostnames()).isEqualTo(twoLocalHosts); } else if (host.equals(localHosts[2])) { assertThat(is.hostnames()).isEqualTo(threeLocalHosts); } else { assertThat(is.hostnames()).isEqualTo(noLocalHost); } } // check we had all assertThat(splits).isEmpty(); assertThat(ia.getNext("anotherHost")).isNotPresent(); assertThat(ia.getNumberOfRemoteAssignments()).isEqualTo(numRemoteSplits); assertThat(ia.getNumberOfLocalAssignments()).isEqualTo(numLocalSplits); } @Test void testAssignmentMixedLocalHost() { final String[] hosts = {"host1", "host1", "host1", "host2", "host2", "host3"}; final int numSplits = 10 * hosts.length; // load some splits Set<FileSourceSplit> splits = new HashSet<>(); for (int i = 0; i < numSplits; i++) { splits.add(createSplit(i, hosts[i % hosts.length])); } // get all available splits LocalityAwareSplitAssigner ia = new LocalityAwareSplitAssigner(splits); Optional<FileSourceSplit> is; int i = 0; while ((is = ia.getNext(hosts[i++ % hosts.length])).isPresent()) { assertThat(splits.remove(is.get())).isTrue(); } // check we had all assertThat(splits).isEmpty(); assertThat(ia.getNext("anotherHost")).isNotPresent(); assertThat(ia.getNumberOfRemoteAssignments()).isEqualTo(0); assertThat(ia.getNumberOfLocalAssignments()).isEqualTo(numSplits); } @Test void testAssignmentOfManySplitsRandomly() { final long seed = Calendar.getInstance().getTimeInMillis(); final int numSplits = 1000; final String[] splitHosts = new String[256]; final String[] requestingHosts = new String[256]; final Random rand = new Random(seed); for (int i = 0; i < splitHosts.length; i++) { splitHosts[i] = "localHost" + i; } for (int i = 0; i < requestingHosts.length; i++) { if (i % 2 == 0) { requestingHosts[i] = "localHost" + i; } else { requestingHosts[i] = "remoteHost" + i; } } String[] stringArray = {}; Set<String> hosts = new HashSet<>(); Set<FileSourceSplit> splits = new HashSet<>(); for (int i = 0; i < numSplits; i++) { while (hosts.size() < 3) { hosts.add(splitHosts[rand.nextInt(splitHosts.length)]); } splits.add(createSplit(i, hosts.toArray(stringArray))); hosts.clear(); } final LocalityAwareSplitAssigner ia = new LocalityAwareSplitAssigner(splits); for (int i = 0; i < numSplits; i++) { final Optional<FileSourceSplit> split = ia.getNext(requestingHosts[rand.nextInt(requestingHosts.length)]); assertThat(split).isPresent(); assertThat(splits.remove(split.get())).isTrue(); } assertThat(splits).isEmpty(); assertThat(ia.getNext("testHost")).isNotPresent(); } // ------------------------------------------------------------------------ // utilities // ------------------------------------------------------------------------ private static FileSourceSplit createSplit(int id, String... hosts) { return new FileSourceSplit(String.valueOf(id), TEST_PATH, 0, 1024, 0, 1024, hosts); } }
LocalityAwareSplitAssignerTest
java
alibaba__fastjson
src/test/java/com/alibaba/json/bvt/JSONObjectTest7.java
{ "start": 146, "end": 428 }
class ____ extends TestCase { public void test() throws Exception { JSONObject jsonObject = JSON.parseObject("{\"test\":null,\"a\":\"cc\"}"); assertEquals(2, jsonObject.entrySet().size()); assertTrue(jsonObject.containsKey("test")); } }
JSONObjectTest7
java
spring-projects__spring-framework
spring-context/src/test/java/org/springframework/instrument/classloading/ReflectiveLoadTimeWeaverTests.java
{ "start": 3498, "end": 3907 }
class ____ extends JustAddTransformerClassLoader { private int numTimesGetThrowawayClassLoaderCalled = 0; @Override public int getNumTimesGetThrowawayClassLoaderCalled() { return this.numTimesGetThrowawayClassLoaderCalled; } public ClassLoader getThrowawayClassLoader() { ++this.numTimesGetThrowawayClassLoaderCalled; return getClass().getClassLoader(); } } }
TotallyCompliantClassLoader
java
spring-projects__spring-framework
spring-web/src/main/java/org/springframework/web/service/annotation/HttpExchangeBeanRegistrationAotProcessor.java
{ "start": 1764, "end": 2589 }
class ____ implements BeanRegistrationAotProcessor { @Override public @Nullable BeanRegistrationAotContribution processAheadOfTime(RegisteredBean registeredBean) { Class<?> beanClass = registeredBean.getBeanClass(); List<Class<?>> exchangeInterfaces = new ArrayList<>(); Search search = MergedAnnotations.search(TYPE_HIERARCHY); for (Class<?> interfaceClass : ClassUtils.getAllInterfacesForClass(beanClass)) { ReflectionUtils.doWithMethods(interfaceClass, method -> { if (!exchangeInterfaces.contains(interfaceClass) && search.from(method).isPresent(HttpExchange.class)) { exchangeInterfaces.add(interfaceClass); } }); } if (!exchangeInterfaces.isEmpty()) { return new AotContribution(exchangeInterfaces); } return null; } private static
HttpExchangeBeanRegistrationAotProcessor
java
netty__netty
example/src/main/java/io/netty/example/http2/tiles/Html.java
{ "start": 829, "end": 3461 }
class ____ { public static final String IP = System.getProperty("ip", "127.0.0.1"); public static final byte[] FOOTER = "</body></html>".getBytes(UTF_8); public static final byte[] HEADER = ("<!DOCTYPE html><html><head lang=\"en\"><title>Netty HTTP/2 Example</title>" + "<style>body {background:#DDD;} div#netty { line-height:0;}</style>" + "<link rel=\"shortcut icon\" href=\"about:blank\">" + "<meta charset=\"UTF-8\"></head><body>A grid of 200 tiled images is shown below. Compare:" + "<p>[<a href='https://" + url(Http2Server.PORT) + "?latency=0'>HTTP/2, 0 latency</a>] [<a href='http://" + url(HttpServer.PORT) + "?latency=0'>HTTP/1, 0 latency</a>]<br/>" + "[<a href='https://" + url(Http2Server.PORT) + "?latency=30'>HTTP/2, 30ms latency</a>] [<a href='http://" + url(HttpServer.PORT) + "?latency=30'>HTTP/1, 30ms latency</a>]<br/>" + "[<a href='https://" + url(Http2Server.PORT) + "?latency=200'>HTTP/2, 200ms latency</a>] [<a href='http://" + url(HttpServer.PORT) + "?latency=200'>HTTP/1, 200ms latency</a>]<br/>" + "[<a href='https://" + url(Http2Server.PORT) + "?latency=1000'>HTTP/2, 1s latency</a>] [<a href='http://" + url(HttpServer.PORT) + "?latency=1000'>HTTP/1, " + "1s latency</a>]<br/>").getBytes(UTF_8); private static final int IMAGES_X_AXIS = 20; private static final int IMAGES_Y_AXIS = 10; private Html() { } private static String url(int port) { return IP + ":" + port + "/http2"; } public static byte[] body(int latency) { int r = Math.abs(new Random().nextInt()); // The string to be built contains 13192 fixed characters plus the variable latency and random cache-bust. int numberOfCharacters = 13192 + stringLength(latency) + stringLength(r); StringBuilder sb = new StringBuilder(numberOfCharacters).append("<div id=\"netty\">"); for (int y = 0; y < IMAGES_Y_AXIS; y++) { for (int x = 0; x < IMAGES_X_AXIS; x++) { sb.append("<img width=30 height=29 src='/http2?x=") .append(x) .append("&y=").append(y) .append("&cachebust=").append(r) .append("&latency=").append(latency) .append("'>"); } sb.append("<br/>\r\n"); } sb.append("</div>"); return sb.toString().getBytes(UTF_8); } private static int stringLength(int value) { return Integer.toString(value).length() * IMAGES_X_AXIS * IMAGES_Y_AXIS; } }
Html
java
google__error-prone
core/src/main/java/com/google/errorprone/bugpatterns/JdkObsolete.java
{ "start": 3091, "end": 3221 }
class ____ extends BugChecker implements NewClassTreeMatcher, ClassTreeMatcher, MemberReferenceTreeMatcher { static
JdkObsolete
java
apache__camel
components/camel-platform-http-main/src/test/java/org/apache/camel/component/platform/http/main/authentication/JWTAuthenticationMainHttpServerTest.java
{ "start": 3274, "end": 3630 }
class ____ extends RouteBuilder { @Override public void configure() throws Exception { from("platform-http:/main-http-test") .log("Received request with headers: ${headers}\nWith body: ${body}") .setBody(simple("main-http-auth-jwt-test-response")); } } }
PlatformHttpRouteBuilder
java
google__guava
guava-testlib/test/com/google/common/testing/NullPointerTesterTest.java
{ "start": 20613, "end": 20933 }
class ____ extends PassObject { @Override public void twoArg(String s, Integer i) { // Fail: missing NPE for s i.intValue(); } } public void testFailTwoArgsFirstArgDoesntThrowNpe() { shouldFail(new FailTwoArgsFirstArgDoesntThrowNpe()); } private static
FailTwoArgsFirstArgDoesntThrowNpe
java
apache__rocketmq
common/src/test/java/org/apache/rocketmq/common/attribute/CQTypeTest.java
{ "start": 926, "end": 1619 }
class ____ { @Test public void testValues() { CQType[] values = CQType.values(); assertEquals(3, values.length); assertEquals(CQType.SimpleCQ, values[0]); assertEquals(CQType.BatchCQ, values[1]); assertEquals(CQType.RocksDBCQ, values[2]); } @Test public void testValueOf() { assertEquals(CQType.SimpleCQ, CQType.valueOf("SimpleCQ")); assertEquals(CQType.BatchCQ, CQType.valueOf("BatchCQ")); assertEquals(CQType.RocksDBCQ, CQType.valueOf("RocksDBCQ")); } @Test(expected = IllegalArgumentException.class) public void testValueOf_InvalidName() { CQType.valueOf("InvalidCQ"); } }
CQTypeTest
java
alibaba__druid
core/src/test/java/com/alibaba/druid/bvt/sql/presto/PrestoAlterTableRename_0.java
{ "start": 308, "end": 1043 }
class ____ { @Test public void test_alter_schema() { String sql = "ALTER SCHEMA name RENAME TO new_name"; SQLStatementParser parser = SQLParserUtils.createSQLStatementParser(sql, DbType.presto); SQLStatement stmt = parser.parseStatement(); assertEquals("ALTER SCHEMA name RENAME TO new_name", stmt.toString()); } @Test public void test_alter_schema2() { String sql = "ALTER SCHEMA db.name RENAME TO new_name"; SQLStatementParser parser = SQLParserUtils.createSQLStatementParser(sql, DbType.presto); SQLStatement stmt = parser.parseStatement(); assertEquals("ALTER SCHEMA db.name RENAME TO new_name", stmt.toString()); } }
PrestoAlterTableRename_0
java
grpc__grpc-java
netty/src/main/java/io/grpc/netty/InternalProtocolNegotiators.java
{ "start": 6279, "end": 6712 }
class ____ extends ProtocolNegotiators.ProtocolNegotiationHandler { protected ProtocolNegotiationHandler(ChannelHandler next, String negotiatorName, ChannelLogger negotiationLogger) { super(next, negotiatorName, negotiationLogger); } protected ProtocolNegotiationHandler(ChannelHandler next, ChannelLogger negotiationLogger) { super(next, negotiationLogger); } } }
ProtocolNegotiationHandler
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/reflection/OtherLogListener.java
{ "start": 307, "end": 726 }
class ____ { private static final Logger log = Logger.getLogger( OtherLogListener.class ); @PrePersist @PostPersist public void log(Object entity) { log.debug("Logging entity " + entity.getClass().getName() + " with hashCode: " + entity.hashCode()); } public void noLog(Object entity) { log.debug("NoLogging entity " + entity.getClass().getName() + " with hashCode: " + entity.hashCode()); } }
OtherLogListener
java
google__truth
extensions/proto/src/main/java/com/google/common/truth/extensions/proto/DiffResult.java
{ "start": 16655, "end": 17274 }
class ____ { abstract Builder setFieldDescriptor(FieldDescriptor fieldDescriptor); abstract Builder setActual(Iterable<?> actual); abstract Builder setExpected(Iterable<?> expected); @ForOverride abstract ImmutableList.Builder<PairResult> pairResultsBuilder(); @CanIgnoreReturnValue final Builder addPairResult(PairResult pairResult) { pairResultsBuilder().add(pairResult); return this; } abstract RepeatedField build(); } } /** Structural summary of the difference between two unknown field sets. */ @AutoValue abstract static
Builder
java
spring-projects__spring-framework
spring-test/src/main/java/org/springframework/test/json/JsonPathValueAssert.java
{ "start": 979, "end": 1442 }
class ____ extends AbstractJsonValueAssert<JsonPathValueAssert> { private final String expression; JsonPathValueAssert(@Nullable Object actual, String expression, @Nullable JsonConverterDelegate converter) { super(actual, JsonPathValueAssert.class, converter); this.expression = expression; } @Override protected String getExpectedErrorMessagePrefix() { return "Expected value at JSON path \"%s\":".formatted(this.expression); } }
JsonPathValueAssert
java
netty__netty
codec-http3/src/test/java/io/netty/handler/codec/http3/Http3FrameToHttpObjectCodecTest.java
{ "start": 29180, "end": 45890 }
class ____ implements ArgumentsProvider { @Override public Stream<? extends Arguments> provideArguments(ExtensionContext extensionContext) { List<Arguments> arguments = new ArrayList<>(); for (boolean headers : new boolean[]{false, true}) { for (boolean last : new boolean[]{false, true}) { for (boolean nonEmptyContent : new boolean[]{false, true}) { for (boolean hasTrailers : new boolean[]{false, true}) { for (boolean voidPromise : new boolean[]{false, true}) { // this test goes through all the branches of Http3FrameToHttpObjectCodec // and ensures right functionality arguments.add(Arguments.of(headers, last, nonEmptyContent, hasTrailers, voidPromise)); } } } } } return arguments.stream(); } } @ParameterizedTest(name = "headers: {0}, last: {1}, nonEmptyContent: {2}, hasTrailers: {3}, voidPromise: {4}") @ArgumentsSource(value = EncodeCombinationsArgumentsProvider.class) public void testEncodeCombination( boolean headers, boolean last, boolean nonEmptyContent, boolean hasTrailers, boolean voidPromise ) { ByteBuf content = nonEmptyContent ? Unpooled.wrappedBuffer(new byte[1]) : Unpooled.EMPTY_BUFFER; HttpHeaders trailers = new DefaultHttpHeaders(); if (hasTrailers) { trailers.add("foo", "bar"); } HttpObject msg; if (headers) { if (last) { msg = new DefaultFullHttpRequest( HttpVersion.HTTP_1_1, HttpMethod.POST, "/foo", content, new DefaultHttpHeaders(), trailers); } else { if (hasTrailers || nonEmptyContent) { // not supported by the netty HTTP/1 model content.release(); return; } msg = new DefaultHttpRequest( HttpVersion.HTTP_1_1, HttpMethod.POST, "/foo", new DefaultHttpHeaders()); } } else { if (last) { msg = new DefaultLastHttpContent(content, trailers); } else { if (hasTrailers) { // makes no sense content.release(); return; } msg = new DefaultHttpContent(content); } } List<ChannelPromise> framePromises = new ArrayList<>(); EmbeddedQuicStreamChannel ch = new EmbeddedQuicStreamChannel( new ChannelOutboundHandlerAdapter() { @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { framePromises.add(promise); ctx.write(msg, ctx.voidPromise()); } }, new Http3FrameToHttpObjectCodec(false) ); ChannelFuture fullPromise = ch.writeOneOutbound(msg, voidPromise ? ch.voidPromise() : ch.newPromise()); ch.flushOutbound(); if (headers) { Http3HeadersFrame headersFrame = ch.readOutbound(); assertThat(headersFrame.headers().scheme().toString(), is("https")); assertThat(headersFrame.headers().method().toString(), is("POST")); assertThat(headersFrame.headers().path().toString(), is("/foo")); } if (nonEmptyContent) { Http3DataFrame dataFrame = ch.readOutbound(); assertThat(dataFrame.content().readableBytes(), is(1)); dataFrame.release(); } if (hasTrailers) { Http3HeadersFrame trailersFrame = ch.readOutbound(); assertThat(trailersFrame.headers().get("foo"), is("bar")); } else if (!nonEmptyContent && !headers) { Http3DataFrame dataFrame = ch.readOutbound(); assertThat(dataFrame.content().readableBytes(), is(0)); dataFrame.release(); } if (!voidPromise) { assertFalse(fullPromise.isDone()); } assertFalse(ch.isOutputShutdown()); for (ChannelPromise framePromise : framePromises) { framePromise.trySuccess(); } if (last) { assertTrue(ch.isOutputShutdown()); } else { assertFalse(ch.isOutputShutdown()); } if (!voidPromise) { assertTrue(fullPromise.isDone()); } assertFalse(ch.finish()); } @Test public void decode100ContinueHttp2HeadersAsFullHttpResponse() { EmbeddedQuicStreamChannel ch = new EmbeddedQuicStreamChannel(new Http3FrameToHttpObjectCodec(false)); Http3Headers headers = new DefaultHttp3Headers(); headers.scheme(HttpScheme.HTTP.name()); headers.status(HttpResponseStatus.CONTINUE.codeAsText()); assertTrue(ch.writeInbound(new DefaultHttp3HeadersFrame(headers))); final FullHttpResponse response = ch.readInbound(); try { assertThat(response.status(), is(HttpResponseStatus.CONTINUE)); assertThat(response.protocolVersion(), is(HttpVersion.HTTP_1_1)); } finally { response.release(); } assertThat(ch.readInbound(), is(nullValue())); assertFalse(ch.finish()); } @Test public void testDecodeResponseHeaders() { EmbeddedQuicStreamChannel ch = new EmbeddedQuicStreamChannel(new Http3FrameToHttpObjectCodec(false)); Http3Headers headers = new DefaultHttp3Headers(); headers.scheme(HttpScheme.HTTP.name()); headers.status(HttpResponseStatus.OK.codeAsText()); assertTrue(ch.writeInbound(new DefaultHttp3HeadersFrame(headers))); HttpResponse response = ch.readInbound(); assertThat(response.status(), is(HttpResponseStatus.OK)); assertThat(response.protocolVersion(), is(HttpVersion.HTTP_1_1)); assertFalse(response instanceof FullHttpResponse); assertTrue(HttpUtil.isTransferEncodingChunked(response)); assertThat(ch.readInbound(), is(nullValue())); assertFalse(ch.finish()); } @Test public void testDecodeResponseHeadersWithContentLength() { EmbeddedQuicStreamChannel ch = new EmbeddedQuicStreamChannel(new Http3FrameToHttpObjectCodec(false)); Http3Headers headers = new DefaultHttp3Headers(); headers.scheme(HttpScheme.HTTP.name()); headers.status(HttpResponseStatus.OK.codeAsText()); headers.setInt("content-length", 0); assertTrue(ch.writeInbound(new DefaultHttp3HeadersFrame(headers))); HttpResponse response = ch.readInbound(); assertThat(response.status(), is(HttpResponseStatus.OK)); assertThat(response.protocolVersion(), is(HttpVersion.HTTP_1_1)); assertFalse(response instanceof FullHttpResponse); assertFalse(HttpUtil.isTransferEncodingChunked(response)); assertThat(ch.readInbound(), is(nullValue())); assertFalse(ch.finish()); } @Test public void testDecodeResponseTrailersAsClient() { EmbeddedQuicStreamChannel ch = new EmbeddedQuicStreamChannel(new Http3FrameToHttpObjectCodec(false)); Http3Headers headers = new DefaultHttp3Headers(); headers.set("key", "value"); assertTrue(ch.writeInboundWithFin(new DefaultHttp3HeadersFrame(headers))); LastHttpContent trailers = ch.readInbound(); try { assertThat(trailers.content().readableBytes(), is(0)); assertThat(trailers.trailingHeaders().get("key"), is("value")); assertFalse(trailers instanceof FullHttpRequest); } finally { trailers.release(); } assertThat(ch.readInbound(), is(nullValue())); assertFalse(ch.finish()); } @Test public void testDecodeDataAsClient() { EmbeddedQuicStreamChannel ch = new EmbeddedQuicStreamChannel(new Http3FrameToHttpObjectCodec(false)); ByteBuf hello = Unpooled.copiedBuffer("hello world", CharsetUtil.UTF_8); assertTrue(ch.writeInbound(new DefaultHttp3DataFrame(hello))); HttpContent content = ch.readInbound(); try { assertThat(content.content().toString(CharsetUtil.UTF_8), is("hello world")); assertFalse(content instanceof LastHttpContent); } finally { content.release(); } assertThat(ch.readInbound(), is(nullValue())); assertFalse(ch.finish()); } @Test public void testDecodeEndDataAsClient() { EmbeddedQuicStreamChannel ch = new EmbeddedQuicStreamChannel(new Http3FrameToHttpObjectCodec(false)); ByteBuf hello = Unpooled.copiedBuffer("hello world", CharsetUtil.UTF_8); assertTrue(ch.writeInboundWithFin(new DefaultHttp3DataFrame(hello))); HttpContent content = ch.readInbound(); try { assertThat(content.content().toString(CharsetUtil.UTF_8), is("hello world")); } finally { content.release(); } LastHttpContent last = ch.readInbound(); try { assertFalse(last.content().isReadable()); assertTrue(last.trailingHeaders().isEmpty()); } finally { last.release(); } assertThat(ch.readInbound(), is(nullValue())); assertFalse(ch.finish()); } @Test public void testHostTranslated() { EmbeddedQuicStreamChannel ch = new EmbeddedQuicStreamChannel(new Http3FrameToHttpObjectCodec(false)); FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/hello/world"); request.headers().add(HttpHeaderNames.HOST, "example.com"); assertTrue(ch.writeOutbound(request)); Http3HeadersFrame headersFrame = ch.readOutbound(); Http3Headers headers = headersFrame.headers(); assertThat(headers.scheme().toString(), is("https")); assertThat(headers.authority().toString(), is("example.com")); assertTrue(ch.isOutputShutdown()); assertFalse(ch.finish()); } @Test public void multipleFramesInFin() throws Exception { EventLoopGroup group = new MultiThreadIoEventLoopGroup(1, NioIoHandler.newFactory()); try { Bootstrap bootstrap = new Bootstrap() .channel(NioDatagramChannel.class) .handler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { // initialized below } }) .group(group); SelfSignedCertificate cert = new SelfSignedCertificate(); Channel server = bootstrap.bind("127.0.0.1", 0).sync().channel(); server.pipeline().addLast(Http3.newQuicServerCodecBuilder() .initialMaxData(10000000) .initialMaxStreamDataBidirectionalLocal(1000000) .initialMaxStreamDataBidirectionalRemote(1000000) .initialMaxStreamsBidirectional(100) .sslContext(QuicSslContextBuilder.forServer(cert.key(), null, cert.cert()) .applicationProtocols(Http3.supportedApplicationProtocols()).build()) .tokenHandler(InsecureQuicTokenHandler.INSTANCE) .handler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast(new Http3ServerConnectionHandler(new ChannelInboundHandlerAdapter() { @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { if (msg instanceof Http3HeadersFrame) { DefaultHttp3HeadersFrame responseHeaders = new DefaultHttp3HeadersFrame(); responseHeaders.headers().status(HttpResponseStatus.OK.codeAsText()); ctx.write(responseHeaders, ctx.voidPromise()); ctx.write(new DefaultHttp3DataFrame(ByteBufUtil.encodeString( ctx.alloc(), CharBuffer.wrap("foo"), CharsetUtil.UTF_8)), ctx.voidPromise()); // send a fin, this also flushes ((DuplexChannel) ctx.channel()).shutdownOutput(); } else { super.channelRead(ctx, msg); } } })); } }) .build()); Channel client = bootstrap.bind("127.0.0.1", 0).sync().channel(); client.config().setAutoRead(true); client.pipeline().addLast(Http3.newQuicClientCodecBuilder() .initialMaxData(10000000) .initialMaxStreamDataBidirectionalLocal(1000000) .sslContext(QuicSslContextBuilder.forClient() .trustManager(InsecureTrustManagerFactory.INSTANCE) .applicationProtocols(Http3.supportedApplicationProtocols()) .build()) .build()); QuicChannel quicChannel = QuicChannel.newBootstrap(client) .handler(new ChannelInitializer<QuicChannel>() { @Override protected void initChannel(QuicChannel ch) throws Exception { ch.pipeline().addLast(new Http3ClientConnectionHandler()); } }) .remoteAddress(server.localAddress()) .localAddress(client.localAddress()) .connect().get(); BlockingQueue<Object> received = new LinkedBlockingQueue<>(); QuicStreamChannel stream = Http3.newRequestStream(quicChannel, new Http3RequestStreamInitializer() { @Override protected void initRequestStream(QuicStreamChannel ch) { ch.pipeline() .addLast(new Http3FrameToHttpObjectCodec(false)) .addLast(new ChannelInboundHandlerAdapter() { @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { received.put(msg); } }); } }).get(); DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); request.headers().add(HttpHeaderNames.HOST, "localhost"); stream.writeAndFlush(request); HttpResponse respHeaders = (HttpResponse) received.poll(20, TimeUnit.SECONDS); assertThat(respHeaders, notNullValue()); assertThat(respHeaders.status(), is(HttpResponseStatus.OK)); assertThat(respHeaders, not(instanceOf(LastHttpContent.class))); HttpContent respBody = (HttpContent) received.poll(20, TimeUnit.SECONDS); assertThat(respBody, notNullValue()); assertThat(respBody.content().toString(CharsetUtil.UTF_8), is("foo")); respBody.release(); LastHttpContent last = (LastHttpContent) received.poll(20, TimeUnit.SECONDS); assertThat(last, notNullValue()); last.release(); } finally { group.shutdownGracefully(); } } @Test public void testUnsupportedIncludeSomeDetails() { EmbeddedQuicStreamChannel ch = new EmbeddedQuicStreamChannel(new Http3FrameToHttpObjectCodec(false)); UnsupportedMessageTypeException ex = assertThrows( UnsupportedMessageTypeException.class, () -> ch.writeOutbound("unsupported")); assertNotNull(ex.getMessage()); assertFalse(ch.finish()); } }
EncodeCombinationsArgumentsProvider
java
FasterXML__jackson-databind
src/test/java/tools/jackson/databind/records/RecordTypeInfo3342Test.java
{ "start": 1854, "end": 2072 }
class ____ extends AbstractMember249 { final String val; @JsonCreator public StringMember(@JsonProperty("val") String val) { this.val = val; } } static final
StringMember
java
spring-projects__spring-framework
spring-tx/src/main/java/org/springframework/transaction/TransactionDefinition.java
{ "start": 12804, "end": 13542 }
class ____ + "." + method name} (by default). * @return the name of this transaction ({@code null} by default} * @see org.springframework.transaction.interceptor.TransactionAspectSupport * @see org.springframework.transaction.support.TransactionSynchronizationManager#getCurrentTransactionName() */ default @Nullable String getName() { return null; } // Static builder methods /** * Return an unmodifiable {@code TransactionDefinition} with defaults. * <p>For customization purposes, use the modifiable * {@link org.springframework.transaction.support.DefaultTransactionDefinition} * instead. * @since 5.2 */ static TransactionDefinition withDefaults() { return StaticTransactionDefinition.INSTANCE; } }
name
java
quarkusio__quarkus
extensions/funqy/funqy-http/deployment/src/main/java/io/quarkus/funqy/deployment/bindings/http/FunqyHttpBuildStep.java
{ "start": 1334, "end": 4201 }
class ____ { public static final String FUNQY_HTTP_FEATURE = "funqy-http"; @BuildStep public void markObjectMapper(BuildProducer<UnremovableBeanBuildItem> unremovable) { unremovable.produce(new UnremovableBeanBuildItem( new UnremovableBeanBuildItem.BeanClassNameExclusion(ObjectMapper.class.getName()))); unremovable.produce(new UnremovableBeanBuildItem( new UnremovableBeanBuildItem.BeanClassNameExclusion(ObjectMapperProducer.class.getName()))); } @BuildStep public RequireBodyHandlerBuildItem requestBodyHandler(List<FunctionBuildItem> functions) { if (functions.isEmpty()) { return null; } // Require the body handler if there are functions as they may require the HTTP body return new RequireBodyHandlerBuildItem(); } @BuildStep() @Record(STATIC_INIT) public void staticInit(FunqyHttpBindingRecorder binding, BeanContainerBuildItem beanContainer, // dependency Optional<FunctionInitializedBuildItem> hasFunctions, VertxHttpBuildTimeConfig httpBuildTimeConfig) { if (!hasFunctions.isPresent() || hasFunctions.get() == null) return; // The context path + the resources path String rootPath = httpBuildTimeConfig.rootPath(); binding.init(); } @BuildStep @Record(RUNTIME_INIT) public void boot(ShutdownContextBuildItem shutdown, FunqyHttpBindingRecorder binding, BuildProducer<FeatureBuildItem> feature, BuildProducer<RouteBuildItem> routes, CoreVertxBuildItem vertx, Optional<FunctionInitializedBuildItem> hasFunctions, List<FunctionBuildItem> functions, BeanContainerBuildItem beanContainer, VertxHttpBuildTimeConfig httpConfig, ExecutorBuildItem executorBuildItem) { if (!hasFunctions.isPresent() || hasFunctions.get() == null) return; feature.produce(new FeatureBuildItem(FUNQY_HTTP_FEATURE)); String rootPath = httpConfig.rootPath(); Handler<RoutingContext> handler = binding.start(rootPath, vertx.getVertx(), shutdown, beanContainer.getValue(), executorBuildItem.getExecutorProxy()); for (FunctionBuildItem function : functions) { if (rootPath == null) rootPath = "/"; else if (!rootPath.endsWith("/")) rootPath += "/"; String name = function.getFunctionName() == null ? function.getMethodName() : function.getFunctionName(); //String path = rootPath + name; String path = "/" + name; routes.produce(RouteBuildItem.builder().route(path).handler(handler).build()); } } }
FunqyHttpBuildStep
java
spring-projects__spring-security
web/src/main/java/org/springframework/security/web/util/matcher/RequestMatcher.java
{ "start": 1854, "end": 3416 }
class ____ { private final boolean match; private final Map<String, String> variables; MatchResult(boolean match, Map<String, String> variables) { this.match = match; this.variables = variables; } /** * @return true if the comparison against the HttpServletRequest produced a * successful match */ public boolean isMatch() { return this.match; } /** * Returns the extracted variable values where the key is the variable name and * the value is the variable value * @return a map containing key-value pairs representing extracted variable names * and variable values */ public Map<String, String> getVariables() { return this.variables; } /** * Creates an instance of {@link MatchResult} that is a match with no variables * @return {@link MatchResult} that is a match with no variables */ public static MatchResult match() { return new MatchResult(true, Collections.emptyMap()); } /** * Creates an instance of {@link MatchResult} that is a match with the specified * variables * @param variables the specified variables * @return {@link MatchResult} that is a match with the specified variables */ public static MatchResult match(Map<String, String> variables) { return new MatchResult(true, variables); } /** * Creates an instance of {@link MatchResult} that is not a match. * @return {@link MatchResult} that is not a match */ public static MatchResult notMatch() { return new MatchResult(false, Collections.emptyMap()); } } }
MatchResult
java
google__dagger
javatests/dagger/internal/codegen/SubcomponentCreatorValidationTest.java
{ "start": 19926, "end": 20125 }
class ____ {", " abstract String s();", "") .addLinesIf( BUILDER, " @Subcomponent.Builder", "
ChildComponent
java
apache__hadoop
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ZKFCTestUtil.java
{ "start": 899, "end": 1235 }
class ____ { public static void waitForHealthState(ZKFailoverController zkfc, HealthMonitor.State state, MultithreadedTestUtil.TestContext ctx) throws Exception { while (zkfc.getLastHealthState() != state) { if (ctx != null) { ctx.checkException(); } Thread.sleep(50); } } }
ZKFCTestUtil
java
apache__hadoop
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
{ "start": 2624, "end": 2806 }
class ____ extends AccessControlException { public MyAuthorizationProviderAccessException() { super(); } }; public static
MyAuthorizationProviderAccessException
java
spring-projects__spring-framework
spring-webflux/src/main/java/org/springframework/web/reactive/function/server/EntityResponse.java
{ "start": 1749, "end": 2529 }
interface ____<T> extends ServerResponse { /** * Return the entity that makes up this response. */ T entity(); /** * Return the {@code BodyInserter} that writes the entity to the output stream. */ BodyInserter<T, ? super ServerHttpResponse> inserter(); // Static builder methods /** * Create a builder with the given object. * @param body the object that represents the body of the response * @param <T> the type of the body * @return the created builder */ static <T> Builder<T> fromObject(T body) { return new DefaultEntityResponseBuilder<>(body, BodyInserters.fromValue(body)); } /** * Create a builder with the given producer. * @param producer the producer that represents the body of the response * @param elementClass the
EntityResponse
java
apache__avro
lang/java/avro/src/main/java/org/apache/avro/Conversions.java
{ "start": 6124, "end": 7837 }
class ____ extends Conversion<BigDecimal> { @Override public Class<BigDecimal> getConvertedType() { return BigDecimal.class; } @Override public String getLogicalTypeName() { return "big-decimal"; } @Override public BigDecimal fromBytes(final ByteBuffer value, final Schema schema, final LogicalType type) { BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(value.array(), null); try { BigInteger bg = null; ByteBuffer buffer = decoder.readBytes(null); byte[] array = buffer.array(); if (array.length > 0) { bg = new BigInteger(array); } int scale = decoder.readInt(); return new BigDecimal(bg, scale); } catch (IOException e) { throw new RuntimeException(e); } } @Override public ByteBuffer toBytes(final BigDecimal value, final Schema schema, final LogicalType type) { try { ByteArrayOutputStream out = new ByteArrayOutputStream(); BinaryEncoder encoder = EncoderFactory.get().binaryEncoder(out, null); BigInteger unscaledValue = value.unscaledValue(); if (unscaledValue != null) { encoder.writeBytes(unscaledValue.toByteArray()); } else { encoder.writeBytes(new byte[] {}); } encoder.writeInt(value.scale()); encoder.flush(); return ByteBuffer.wrap(out.toByteArray()); } catch (IOException e) { throw new RuntimeException(e); } } @Override public Schema getRecommendedSchema() { return LogicalTypes.bigDecimal().addToSchema(Schema.create(Schema.Type.BYTES)); } } public static
BigDecimalConversion
java
lettuce-io__lettuce-core
src/main/java/io/lettuce/core/sentinel/StatefulRedisSentinelConnectionImpl.java
{ "start": 4634, "end": 4832 }
class ____ extends ConnectionState { @Override protected void setClientName(String clientName) { super.setClientName(clientName); } } }
SentinelConnectionState
java
apache__logging-log4j2
log4j-layout-template-json/src/main/java/org/apache/logging/log4j/layout/template/json/util/StringParameterParser.java
{ "start": 3918, "end": 9565 }
class ____ implements Callable<Map<String, Value>> { private final String input; private final Map<String, Value> map; private State state; private int i; private String key; private Parser(final String input) { this.input = Objects.requireNonNull(input, "input"); this.map = new LinkedHashMap<>(); this.state = State.READING_KEY; this.i = 0; this.key = null; } @Override public Map<String, Value> call() { while (true) { skipWhitespace(); if (i >= input.length()) { break; } switch (state) { case READING_KEY: readKey(); break; case READING_VALUE: readValue(); break; default: throw new IllegalStateException("unknown state: " + state); } } if (state == State.READING_VALUE) { map.put(key, Values.nullValue()); } return map; } private void readKey() { final int eq = input.indexOf('=', i); final int co = input.indexOf(',', i); final int j; final int nextI; if (eq < 0 && co < 0) { // Neither '=', nor ',' was found. j = nextI = input.length(); } else if (eq < 0) { // Found ','. j = nextI = co; } else if (co < 0) { // Found '='. j = eq; nextI = eq + 1; } else if (eq < co) { // Found '=...,'. j = eq; nextI = eq + 1; } else { // Found ',...='. j = co; nextI = co; } key = input.substring(i, j).trim(); if (Strings.isEmpty(key)) { final String message = String.format("failed to locate key at index %d: %s", i, input); throw new IllegalArgumentException(message); } if (map.containsKey(key)) { final String message = String.format("conflicting key at index %d: %s", i, input); throw new IllegalArgumentException(message); } state = State.READING_VALUE; i = nextI; } private void readValue() { final boolean doubleQuoted = input.charAt(i) == '"'; if (doubleQuoted) { readDoubleQuotedStringValue(); } else { readStringValue(); } key = null; state = State.READING_KEY; } private void readDoubleQuotedStringValue() { int j = i + 1; while (j < input.length()) { if (input.charAt(j) == '"' && input.charAt(j - 1) != '\\') { break; } else { j++; } } if (j >= input.length()) { final String message = String.format( "failed to locate the end of double-quoted content starting at index %d: %s", i, input); throw new IllegalArgumentException(message); } final String content = input.substring(i + 1, j).replaceAll("\\\\\"", "\""); final Value value = Values.doubleQuotedStringValue(content); map.put(key, value); i = j + 1; skipWhitespace(); if (i < input.length()) { if (input.charAt(i) != ',') { final String message = String.format("was expecting comma at index %d: %s", i, input); throw new IllegalArgumentException(message); } i++; } } private void skipWhitespace() { while (i < input.length()) { final char c = input.charAt(i); if (!Character.isWhitespace(c)) { break; } else { i++; } } } private void readStringValue() { int j = input.indexOf(',', i /* + 1*/); if (j < 0) { j = input.length(); } final String content = input.substring(i, j); final String trimmedContent = content.trim(); final Value value = trimmedContent.isEmpty() ? Values.nullValue() : Values.stringValue(trimmedContent); map.put(key, value); i += content.length() + 1; } } public static Map<String, Value> parse(final String input) { return parse(input, null); } public static Map<String, Value> parse(final String input, final Set<String> allowedKeys) { if (Strings.isBlank(input)) { return Collections.emptyMap(); } final Map<String, Value> map = new Parser(input).call(); final Set<String> actualKeys = map.keySet(); for (final String actualKey : actualKeys) { final boolean allowed = allowedKeys == null || allowedKeys.contains(actualKey); if (!allowed) { final String message = String.format("unknown key \"%s\" is found in input: %s", actualKey, input); throw new IllegalArgumentException(message); } } return map; } }
Parser
java
spring-projects__spring-boot
module/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/info/ProcessInfoContributor.java
{ "start": 1413, "end": 1709 }
class ____ implements InfoContributor { private final ProcessInfo processInfo; public ProcessInfoContributor() { this.processInfo = new ProcessInfo(); } @Override public void contribute(Builder builder) { builder.withDetail("process", this.processInfo); } static
ProcessInfoContributor
java
apache__camel
components/camel-ftp/src/main/java/org/apache/camel/component/file/remote/SftpOperations.java
{ "start": 3549, "end": 4027 }
class ____ { final RemoteFileConfiguration configuration; private Exception exception; public TaskPayload(RemoteFileConfiguration configuration) { this.configuration = configuration; } } public SftpOperations() { } public SftpOperations(Proxy proxy) { this.proxy = proxy; } /** * Extended user info which supports interactive keyboard mode, by entering the password. */ public
TaskPayload
java
spring-projects__spring-security
core/src/test/java/org/springframework/security/authentication/dao/DaoAuthenticationProviderTests.java
{ "start": 27119, "end": 27377 }
class ____ implements UserDetailsService { @Override public UserDetails loadUserByUsername(String username) { throw new DataRetrievalFailureException("This mock simulator is designed to fail"); } } private
MockUserDetailsServiceSimulateBackendError
java
apache__camel
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/KubernetesNamespacesComponentBuilderFactory.java
{ "start": 2038, "end": 6170 }
interface ____ extends ComponentBuilder<KubernetesNamespacesComponent> { /** * To use an existing kubernetes client. * * The option is a: * &lt;code&gt;io.fabric8.kubernetes.client.KubernetesClient&lt;/code&gt; type. * * Group: common * * @param kubernetesClient the value to set * @return the dsl builder */ default KubernetesNamespacesComponentBuilder kubernetesClient(io.fabric8.kubernetes.client.KubernetesClient kubernetesClient) { doSetProperty("kubernetesClient", kubernetesClient); return this; } /** * Allows for bridging the consumer to the Camel routing Error Handler, * which mean any exceptions (if possible) occurred while the Camel * consumer is trying to pickup incoming messages, or the likes, will * now be processed as a message and handled by the routing Error * Handler. Important: This is only possible if the 3rd party component * allows Camel to be alerted if an exception was thrown. Some * components handle this internally only, and therefore * bridgeErrorHandler is not possible. In other situations we may * improve the Camel component to hook into the 3rd party component and * make this possible for future releases. By default the consumer will * use the org.apache.camel.spi.ExceptionHandler to deal with * exceptions, that will be logged at WARN or ERROR level and ignored. * * The option is a: &lt;code&gt;boolean&lt;/code&gt; type. * * Default: false * Group: consumer * * @param bridgeErrorHandler the value to set * @return the dsl builder */ default KubernetesNamespacesComponentBuilder bridgeErrorHandler(boolean bridgeErrorHandler) { doSetProperty("bridgeErrorHandler", bridgeErrorHandler); return this; } /** * Whether the producer should be started lazy (on the first message). * By starting lazy you can use this to allow CamelContext and routes to * startup in situations where a producer may otherwise fail during * starting and cause the route to fail being started. By deferring this * startup to be lazy then the startup failure can be handled during * routing messages via Camel's routing error handlers. Beware that when * the first message is processed then creating and starting the * producer may take a little time and prolong the total processing time * of the processing. * * The option is a: &lt;code&gt;boolean&lt;/code&gt; type. * * Default: false * Group: producer * * @param lazyStartProducer the value to set * @return the dsl builder */ default KubernetesNamespacesComponentBuilder lazyStartProducer(boolean lazyStartProducer) { doSetProperty("lazyStartProducer", lazyStartProducer); return this; } /** * Whether autowiring is enabled. This is used for automatic autowiring * options (the option must be marked as autowired) by looking up in the * registry to find if there is a single instance of matching type, * which then gets configured on the component. This can be used for * automatic configuring JDBC data sources, JMS connection factories, * AWS Clients, etc. * * The option is a: &lt;code&gt;boolean&lt;/code&gt; type. * * Default: true * Group: advanced * * @param autowiredEnabled the value to set * @return the dsl builder */ default KubernetesNamespacesComponentBuilder autowiredEnabled(boolean autowiredEnabled) { doSetProperty("autowiredEnabled", autowiredEnabled); return this; } }
KubernetesNamespacesComponentBuilder
java
square__retrofit
retrofit/src/main/java/retrofit2/CallAdapter.java
{ "start": 1046, "end": 2300 }
interface ____<R, T> { /** * Returns the value type that this adapter uses when converting the HTTP response body to a Java * object. For example, the response type for {@code Call<Repo>} is {@code Repo}. This type is * used to prepare the {@code call} passed to {@code #adapt}. * * <p>Note: This is typically not the same type as the {@code returnType} provided to this call * adapter's factory. */ Type responseType(); /** * Returns an instance of {@code T} which delegates to {@code call}. * * <p>For example, given an instance for a hypothetical utility, {@code Async}, this instance * would return a new {@code Async<R>} which invoked {@code call} when run. * * <pre><code> * &#64;Override * public &lt;R&gt; Async&lt;R&gt; adapt(final Call&lt;R&gt; call) { * return Async.create(new Callable&lt;Response&lt;R&gt;&gt;() { * &#64;Override * public Response&lt;R&gt; call() throws Exception { * return call.execute(); * } * }); * } * </code></pre> */ T adapt(Call<R> call); /** * Creates {@link CallAdapter} instances based on the return type of {@linkplain * Retrofit#create(Class) the service interface} methods. */ abstract
CallAdapter
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/query/hql/instantiation/InstantiationWithGenericsExpressionTest.java
{ "start": 5882, "end": 6172 }
class ____ { private long gen; private String data; public long getGen() { return gen; } public void setGen(final long gen) { this.gen = gen; } public String getData() { return data; } public void setData(String data) { this.data = data; } } }
InjectionDto
java
elastic__elasticsearch
x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java
{ "start": 3178, "end": 20638 }
class ____ implements SchedulerEngine.Listener { private static final Logger logger = LogManager.getLogger(SnapshotLifecycleTask.class); private final ProjectId projectId; private final Client client; private final ClusterService clusterService; private final SnapshotHistoryStore historyStore; public SnapshotLifecycleTask( final ProjectId projectId, final Client client, final ClusterService clusterService, final SnapshotHistoryStore historyStore ) { this.projectId = projectId; this.client = new OriginSettingClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN); this.clusterService = clusterService; this.historyStore = historyStore; } /** * Find {@link RegisteredPolicySnapshots} for the given policy that are no longer running. * @param projectState the current project state * @param policyId the policy id for which to find completed registered snapshots * @return a list of snapshot names */ static List<String> findCompletedRegisteredSnapshotNames(ProjectState projectState, String policyId) { Set<SnapshotId> runningSnapshots = currentlyRunningSnapshots(projectState.cluster()); RegisteredPolicySnapshots registeredSnapshots = projectState.metadata() .custom(RegisteredPolicySnapshots.TYPE, RegisteredPolicySnapshots.EMPTY); return registeredSnapshots.getSnapshots() .stream() // look for snapshots of this SLM policy, leave the rest to the policy that owns it .filter(policySnapshot -> policySnapshot.getPolicy().equals(policyId)) // look for snapshots that are no longer running .filter(policySnapshot -> runningSnapshots.contains(policySnapshot.getSnapshotId()) == false) .map(policySnapshot -> policySnapshot.getSnapshotId().getName()) .toList(); } @Override public void triggered(SchedulerEngine.Event event) { logger.debug("snapshot lifecycle policy task triggered from job [{}]", event.jobName()); ProjectMetadata projectMetadata = clusterService.state().getMetadata().getProject(projectId); final Optional<String> snapshotName = maybeTakeSnapshot(projectMetadata, event.jobName(), client, clusterService, historyStore); // Would be cleaner if we could use Optional#ifPresentOrElse snapshotName.ifPresent( name -> logger.info( "snapshot lifecycle policy job [{}] issued new snapshot creation for [{}] successfully", event.jobName(), name ) ); if (snapshotName.isPresent() == false) { logger.warn("snapshot lifecycle policy for job [{}] no longer exists, snapshot not created", event.jobName()); } } /** * Find {@link RegisteredPolicySnapshots} that are no longer running, and fetch their snapshot info. These snapshots should have been * removed from the registered set by WriteJobStatus when they were completed. However, they were not removed likely due to the master * being shutdown at the same time of a SLM run, causing WriteJobStatus to fail. These registered snapshots will be cleaned up in the * next SLM run and their stats will be retroactively recorded in SLM cluster state based on their status. */ private static void findCompletedRegisteredSnapshotInfo( final ProjectState projectState, final String policyId, final Client client, final ActionListener<List<SnapshotInfo>> listener ) { var snapshotNames = findCompletedRegisteredSnapshotNames(projectState, policyId); if (snapshotNames.isEmpty() == false) { var policyMetadata = getSnapPolicyMetadataById(projectState.metadata(), policyId); if (policyMetadata.isPresent() == false) { listener.onFailure(new IllegalStateException(format("snapshot lifecycle policy [%s] no longer exists", policyId))); return; } SnapshotLifecyclePolicy policy = policyMetadata.get().getPolicy(); GetSnapshotsRequest request = new GetSnapshotsRequest( TimeValue.MAX_VALUE, // do not time out internal request in case of slow master node new String[] { policy.getRepository() }, snapshotNames.toArray(new String[0]) ); request.ignoreUnavailable(true); request.includeIndexNames(false); client.admin() .cluster() .execute( TransportGetSnapshotsAction.TYPE, request, ActionListener.wrap(response -> listener.onResponse(response.getSnapshots()), listener::onFailure) ); } else { listener.onResponse(Collections.emptyList()); } } /** * For the given job id (a combination of policy id and version), issue a create snapshot * request. On a successful or failed create snapshot issuing the state is stored in the cluster * state in the policy's metadata * @return An optional snapshot name if the request was issued successfully */ public static Optional<String> maybeTakeSnapshot( final ProjectMetadata projectMetadata, final String jobId, final Client client, final ClusterService clusterService, final SnapshotHistoryStore historyStore ) { ProjectId projectId = projectMetadata.id(); Optional<SnapshotLifecyclePolicyMetadata> maybeMetadata = getSnapPolicyMetadata(projectMetadata, jobId); String snapshotName = maybeMetadata.map(policyMetadata -> { String policyId = policyMetadata.getPolicy().getId(); // don't time out on this request to not produce failed SLM runs in case of a temporarily slow master node CreateSnapshotRequest request = policyMetadata.getPolicy().toRequest(TimeValue.MAX_VALUE); final SnapshotId snapshotId = new SnapshotId(request.snapshot(), request.uuid()); final LifecyclePolicySecurityClient clientWithHeaders = new LifecyclePolicySecurityClient( client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, policyMetadata.getHeaders() ); logger.info( "snapshot lifecycle policy [{}] issuing create snapshot [{}]", policyMetadata.getPolicy().getId(), request.snapshot() ); clientWithHeaders.admin().cluster().createSnapshot(request, new ActionListener<>() { @Override public void onResponse(CreateSnapshotResponse createSnapshotResponse) { logger.debug( "snapshot response for [{}]: {}", policyMetadata.getPolicy().getId(), Strings.toString(createSnapshotResponse) ); final SnapshotInfo snapInfo = createSnapshotResponse.getSnapshotInfo(); assert snapInfo != null : "completed snapshot info is null"; // Check that there are no failed shards, since the request may not entirely // fail, but may still have failures (such as in the case of an aborted snapshot) if (snapInfo.failedShards() == 0) { long snapshotStartTime = snapInfo.startTime(); final long timestamp = Instant.now().toEpochMilli(); historyStore.putAsync( SnapshotHistoryItem.creationSuccessRecord(timestamp, policyMetadata.getPolicy(), request.snapshot()) ); // retrieve the current project state after snapshot is completed, since snapshotting can take a while ProjectState currentProjectState = clusterService.state().projectState(projectId); findCompletedRegisteredSnapshotInfo(currentProjectState, policyId, client, new ActionListener<>() { @Override public void onResponse(List<SnapshotInfo> snapshotInfo) { submitUnbatchedTask( clusterService, "slm-record-success-" + policyId, WriteJobStatus.success(projectId, policyId, snapshotId, snapshotStartTime, timestamp, snapshotInfo) ); } @Override public void onFailure(Exception e) { logger.warn(() -> format("failed to retrieve stale registered snapshots for job [%s]", jobId), e); // still record the successful snapshot submitUnbatchedTask( clusterService, "slm-record-success-" + policyId, WriteJobStatus.success( projectId, policyId, snapshotId, snapshotStartTime, timestamp, Collections.emptyList() ) ); } }); } else { int failures = snapInfo.failedShards(); int total = snapInfo.totalShards(); final SnapshotException e = new SnapshotException( request.repository(), request.snapshot(), "failed to create snapshot successfully, " + failures + " out of " + total + " total shards failed" ); // Call the failure handler to register this as a failure and persist it onFailure(e); } } @Override public void onFailure(Exception e) { SnapshotHistoryStore.logErrorOrWarning( logger, clusterService.state(), () -> format("failed to create snapshot for snapshot lifecycle policy [%s]", policyMetadata.getPolicy().getId()), e ); final long timestamp = Instant.now().toEpochMilli(); try { final SnapshotHistoryItem failureRecord = SnapshotHistoryItem.creationFailureRecord( timestamp, policyMetadata.getPolicy(), request.snapshot(), e ); historyStore.putAsync(failureRecord); } catch (IOException ex) { // This shouldn't happen unless there's an issue with serializing the original exception, which // shouldn't happen logger.error( () -> format( "failed to record snapshot creation failure for snapshot lifecycle policy [%s]", policyMetadata.getPolicy().getId() ), e ); } // retrieve the current project state after snapshot is completed, since snapshotting can take a while ProjectState currentProjectState = clusterService.state().projectState(projectId); findCompletedRegisteredSnapshotInfo(currentProjectState, policyId, client, new ActionListener<>() { @Override public void onResponse(List<SnapshotInfo> snapshotInfo) { submitUnbatchedTask( clusterService, "slm-record-failure-" + policyMetadata.getPolicy().getId(), WriteJobStatus.failure( projectId, policyMetadata.getPolicy().getId(), snapshotId, timestamp, snapshotInfo, e ) ); } @Override public void onFailure(Exception e) { logger.warn(() -> format("failed to retrieve stale registered snapshots for job [%s]", jobId), e); // still record the failed snapshot submitUnbatchedTask( clusterService, "slm-record-failure-" + policyMetadata.getPolicy().getId(), WriteJobStatus.failure( projectId, policyMetadata.getPolicy().getId(), snapshotId, timestamp, Collections.emptyList(), e ) ); } }); } }); return request.snapshot(); }).orElse(null); return Optional.ofNullable(snapshotName); } @SuppressForbidden(reason = "legacy usage of unbatched task") // TODO add support for batching here private static void submitUnbatchedTask( ClusterService clusterService, @SuppressWarnings("SameParameterValue") String source, ClusterStateUpdateTask task ) { clusterService.submitUnbatchedStateUpdateTask(source, task); } /** * For the given job id, return an optional policy metadata object, if one exists */ static Optional<SnapshotLifecyclePolicyMetadata> getSnapPolicyMetadata(final ProjectMetadata projectMetadata, final String jobId) { return Optional.ofNullable((SnapshotLifecycleMetadata) projectMetadata.custom(SnapshotLifecycleMetadata.TYPE)) .map(SnapshotLifecycleMetadata::getSnapshotConfigurations) .flatMap(configMap -> configMap.values().stream().filter(policyMeta -> jobId.equals(getJobId(policyMeta))).findFirst()); } /** * For the given policy id, return an optional policy metadata object, if one exists */ static Optional<SnapshotLifecyclePolicyMetadata> getSnapPolicyMetadataById( final ProjectMetadata projectMetadata, final String policyId ) { return Optional.ofNullable((SnapshotLifecycleMetadata) projectMetadata.custom(SnapshotLifecycleMetadata.TYPE)) .map(metadata -> metadata.getSnapshotConfigurations().get(policyId)); } public static String exceptionToString(Exception ex) { return Strings.toString((builder, params) -> { ElasticsearchException.generateThrowableXContent(builder, params, ex); return builder; }, ToXContent.EMPTY_PARAMS); } static Set<SnapshotId> currentlyRunningSnapshots(ClusterState clusterState) { final SnapshotsInProgress snapshots = clusterState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); final Set<SnapshotId> currentlyRunning = new HashSet<>(); @FixForMultiProject(description = "replace with snapshots.entriesByRepo(ProjectId) when SLM is project aware") final Iterable<List<SnapshotsInProgress.Entry>> entriesByRepo = snapshots.entriesByRepo(); for (final List<SnapshotsInProgress.Entry> entriesForRepo : entriesByRepo) { for (SnapshotsInProgress.Entry entry : entriesForRepo) { currentlyRunning.add(entry.snapshot().getSnapshotId()); } } return currentlyRunning; } static SnapshotInvocationRecord buildFailedSnapshotRecord(SnapshotId snapshot) { return new SnapshotInvocationRecord( snapshot.getName(), null, Instant.now().toEpochMilli(), String.format(Locale.ROOT, "found registered snapshot [%s] which is no longer running, assuming failed.", snapshot.getName()) ); } static SnapshotInvocationRecord buildSnapshotRecord(SnapshotInfo snapshotInfo, @Nullable String details) { return new SnapshotInvocationRecord(snapshotInfo.snapshotId().getName(), snapshotInfo.startTime(), snapshotInfo.endTime(), details); } static boolean isSnapshotSuccessful(SnapshotInfo snapshotInfo) { return snapshotInfo.state() != null && snapshotInfo.state().completed() && snapshotInfo.failedShards() == 0; } /** * A cluster state update task to write the result of a snapshot job to the cluster metadata for the associated policy. */ static
SnapshotLifecycleTask
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java
{ "start": 9781, "end": 16760 }
class ____ extends InternalOrder { private final byte id; private final String key; private final SortOrder order; private final Comparator<Bucket> comparator; private final Comparator<DelayedBucket<? extends Bucket>> delayedBucketCompator; SimpleOrder( byte id, String key, SortOrder order, Comparator<Bucket> comparator, Comparator<DelayedBucket<? extends Bucket>> delayedBucketCompator ) { this.id = id; this.key = key; this.order = order; this.comparator = comparator; this.delayedBucketCompator = delayedBucketCompator; } @Override public Comparator<Bucket> comparator() { return comparator; } @SuppressWarnings({ "rawtypes", "unchecked" }) @Override <B extends InternalMultiBucketAggregation.InternalBucket> Comparator<DelayedBucket<B>> delayedBucketComparator( BiFunction<List<B>, AggregationReduceContext, B> reduce, AggregationReduceContext reduceContext ) { return (Comparator) delayedBucketCompator; } @Override byte id() { return id; } @Override public <T extends Bucket> Comparator<BucketAndOrd<T>> partiallyBuiltBucketComparator(Aggregator aggregator) { Comparator<Bucket> comparator = comparator(); return (lhs, rhs) -> comparator.compare(lhs.bucket, rhs.bucket); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.startObject().field(key, order.toString()).endObject(); } @Override public int hashCode() { return Objects.hash(id, key, order); } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } SimpleOrder other = (SimpleOrder) obj; return Objects.equals(id, other.id) && Objects.equals(key, other.key) && Objects.equals(order, other.order); } } private static final byte COUNT_DESC_ID = 1; private static final byte COUNT_ASC_ID = 2; private static final byte KEY_DESC_ID = 3; private static final byte KEY_ASC_ID = 4; /** * Order by the (higher) count of each bucket. */ static final InternalOrder COUNT_DESC = new SimpleOrder( COUNT_DESC_ID, "_count", SortOrder.DESC, comparingCounts().reversed(), comparingDelayedCounts().reversed() ); /** * Order by the (lower) count of each bucket. */ static final InternalOrder COUNT_ASC = new SimpleOrder( COUNT_ASC_ID, "_count", SortOrder.ASC, comparingCounts(), comparingDelayedCounts() ); /** * Order by the key of each bucket descending. */ static final InternalOrder KEY_DESC = new SimpleOrder( KEY_DESC_ID, "_key", SortOrder.DESC, comparingKeys().reversed(), comparingDelayedKeys().reversed() ); /** * Order by the key of each bucket ascending. */ static final InternalOrder KEY_ASC = new SimpleOrder(KEY_ASC_ID, "_key", SortOrder.ASC, comparingKeys(), comparingDelayedKeys()); /** * @return compare by {@link Bucket#getDocCount()}. */ private static Comparator<Bucket> comparingCounts() { return Comparator.comparingLong(Bucket::getDocCount); } /** * @return compare by {@link Bucket#getDocCount()} that will be in the bucket once it is reduced */ private static Comparator<DelayedBucket<? extends Bucket>> comparingDelayedCounts() { return Comparator.comparingLong(DelayedBucket::getDocCount); } /** * @return compare by {@link Bucket#getKey()} from the appropriate implementation. */ @SuppressWarnings("unchecked") private static Comparator<Bucket> comparingKeys() { return (b1, b2) -> { if (b1 instanceof KeyComparable) { return ((KeyComparable) b1).compareKey(b2); } throw new IllegalStateException("Unexpected order bucket class [" + b1.getClass() + "]"); }; } /** * @return compare by {@link Bucket#getKey()} that will be in the bucket once it is reduced */ private static Comparator<DelayedBucket<? extends Bucket>> comparingDelayedKeys() { return DelayedBucket::compareKey; } /** * Determine if the ordering strategy is sorting on bucket count descending. * * @param order bucket ordering strategy to check. * @return {@code true} if the ordering strategy is sorting on bucket count descending, {@code false} otherwise. */ public static boolean isCountDesc(BucketOrder order) { return isOrder(order, COUNT_DESC); } /** * Determine if the ordering strategy is sorting on bucket key (ascending or descending). * * @param order bucket ordering strategy to check. * @return {@code true} if the ordering strategy is sorting on bucket key, {@code false} otherwise. */ public static boolean isKeyOrder(BucketOrder order) { return isOrder(order, KEY_ASC) || isOrder(order, KEY_DESC); } /** * Determine if the ordering strategy is sorting on bucket key ascending. * * @param order bucket ordering strategy to check. * @return {@code true} if the ordering strategy is sorting on bucket key ascending, {@code false} otherwise. */ public static boolean isKeyAsc(BucketOrder order) { return isOrder(order, KEY_ASC); } /** * Determine if the ordering strategy is sorting on bucket key descending. * * @param order bucket ordering strategy to check. * @return {@code true} if the ordering strategy is sorting on bucket key descending, {@code false} otherwise. */ public static boolean isKeyDesc(BucketOrder order) { return isOrder(order, KEY_DESC); } /** * Determine if the ordering strategy matches the expected one. * * @param order bucket ordering strategy to check. If this is a {@link CompoundOrder} the first element will be * check instead. * @param expected expected bucket ordering strategy. * @return {@code true} if the order matches, {@code false} otherwise. */ private static boolean isOrder(BucketOrder order, BucketOrder expected) { return order == expected || (order instanceof CompoundOrder compoundOrder && compoundOrder.orderElements.getFirst() == expected); } /** * Contains logic for reading/writing {@link BucketOrder} from/to streams. */ public static
SimpleOrder
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/MiloBrowseEndpointBuilderFactory.java
{ "start": 1609, "end": 9622 }
interface ____ extends EndpointProducerBuilder { default AdvancedMiloBrowseEndpointBuilder advanced() { return (AdvancedMiloBrowseEndpointBuilder) this; } /** * A virtual client id to force the creation of a new connection * instance. * * The option is a: <code>java.lang.String</code> type. * * Group: producer * * @param clientId the value to set * @return the dsl builder */ default MiloBrowseEndpointBuilder clientId(String clientId) { doSetProperty("clientId", clientId); return this; } /** * When browsing recursively into sub-types, what's the maximum search * depth for diving into the tree. Default value notice: Maximum depth * for browsing recursively (only if recursive = true). * * The option is a: <code>int</code> type. * * Default: 3 * Group: producer * * @param depth the value to set * @return the dsl builder */ default MiloBrowseEndpointBuilder depth(int depth) { doSetProperty("depth", depth); return this; } /** * When browsing recursively into sub-types, what's the maximum search * depth for diving into the tree. Default value notice: Maximum depth * for browsing recursively (only if recursive = true). * * The option will be converted to a <code>int</code> type. * * Default: 3 * Group: producer * * @param depth the value to set * @return the dsl builder */ default MiloBrowseEndpointBuilder depth(String depth) { doSetProperty("depth", depth); return this; } /** * The direction to browse (forward, inverse, ...). Default value * notice: The direction to browse; see * org.eclipse.milo.opcua.stack.core.types.enumerated.BrowseDirection. * * The option is a: * <code>org.eclipse.milo.opcua.stack.core.types.enumerated.BrowseDirection</code> type. * * Default: Forward * Group: producer * * @param direction the value to set * @return the dsl builder */ default MiloBrowseEndpointBuilder direction(org.eclipse.milo.opcua.stack.core.types.enumerated.BrowseDirection direction) { doSetProperty("direction", direction); return this; } /** * The direction to browse (forward, inverse, ...). Default value * notice: The direction to browse; see * org.eclipse.milo.opcua.stack.core.types.enumerated.BrowseDirection. * * The option will be converted to a * <code>org.eclipse.milo.opcua.stack.core.types.enumerated.BrowseDirection</code> type. * * Default: Forward * Group: producer * * @param direction the value to set * @return the dsl builder */ default MiloBrowseEndpointBuilder direction(String direction) { doSetProperty("direction", direction); return this; } /** * A suffix for endpoint URI when discovering. * * The option is a: <code>java.lang.String</code> type. * * Group: producer * * @param discoveryEndpointSuffix the value to set * @return the dsl builder */ default MiloBrowseEndpointBuilder discoveryEndpointSuffix(String discoveryEndpointSuffix) { doSetProperty("discoveryEndpointSuffix", discoveryEndpointSuffix); return this; } /** * An alternative discovery URI. * * The option is a: <code>java.lang.String</code> type. * * Group: producer * * @param discoveryEndpointUri the value to set * @return the dsl builder */ default MiloBrowseEndpointBuilder discoveryEndpointUri(String discoveryEndpointUri) { doSetProperty("discoveryEndpointUri", discoveryEndpointUri); return this; } /** * Filter out node ids to limit browsing. Default value notice: Regular * filter expression matching node ids. * * The option is a: <code>java.lang.String</code> type. * * Default: None * Group: producer * * @param filter the value to set * @return the dsl builder */ default MiloBrowseEndpointBuilder filter(String filter) { doSetProperty("filter", filter); return this; } /** * Whether to include sub-types for browsing; only applicable for * non-recursive browsing. * * The option is a: <code>boolean</code> type. * * Default: true * Group: producer * * @param includeSubTypes the value to set * @return the dsl builder */ default MiloBrowseEndpointBuilder includeSubTypes(boolean includeSubTypes) { doSetProperty("includeSubTypes", includeSubTypes); return this; } /** * Whether to include sub-types for browsing; only applicable for * non-recursive browsing. * * The option will be converted to a <code>boolean</code> type. * * Default: true * Group: producer * * @param includeSubTypes the value to set * @return the dsl builder */ default MiloBrowseEndpointBuilder includeSubTypes(String includeSubTypes) { doSetProperty("includeSubTypes", includeSubTypes); return this; } /** * The maximum number node ids requested per server call. Default value * notice: Maximum number of node ids requested per browse call (applies * to browsing sub-types only; only if recursive = true). * * The option is a: <code>int</code> type. * * Default: 10 * Group: producer * * @param maxNodeIdsPerRequest the value to set * @return the dsl builder */ default MiloBrowseEndpointBuilder maxNodeIdsPerRequest(int maxNodeIdsPerRequest) { doSetProperty("maxNodeIdsPerRequest", maxNodeIdsPerRequest); return this; } /** * The maximum number node ids requested per server call. Default value * notice: Maximum number of node ids requested per browse call (applies * to browsing sub-types only; only if recursive = true). * * The option will be converted to a <code>int</code> type. * * Default: 10 * Group: producer * * @param maxNodeIdsPerRequest the value to set * @return the dsl builder */ default MiloBrowseEndpointBuilder maxNodeIdsPerRequest(String maxNodeIdsPerRequest) { doSetProperty("maxNodeIdsPerRequest", maxNodeIdsPerRequest); return this; } /** * The node definition (see Node ID). Default value notice: Root folder * as per OPC-UA spec. * * The option is a: <code>java.lang.String</code> type. * * Default: ns=0;id=84 * Group: producer * * @param node the value to set * @return the dsl builder */ default MiloBrowseEndpointBuilder node(String node) { doSetProperty("node", node); return this; } /** * The mask indicating the node classes of interest in browsing. Default * value notice: Comma-separated node
MiloBrowseEndpointBuilder
java
apache__flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/CheckpointStorageLoaderTest.java
{ "start": 23190, "end": 23500 }
class ____ implements CheckpointStorageFactory<MockStorage> { @Override public MockStorage createFromConfig(ReadableConfig config, ClassLoader classLoader) throws IllegalConfigurationException { return new MockStorage(); } } static final
WorkingFactory
java
spring-projects__spring-data-jpa
spring-data-jpa/src/test/java/org/springframework/data/jpa/convert/QueryByExamplePredicateBuilderUnitTests.java
{ "start": 2694, "end": 11109 }
class ____ { @Mock CriteriaBuilder cb; @Mock Root root; @Mock EntityType<Person> personEntityType; @Mock EntityType<Skill> skillEntityType; @Mock Expression expressionMock; @Mock Predicate truePredicate, dummyPredicate, andPredicate, orPredicate; @Mock Path dummyPath; @Mock Join from; private Set<SingularAttribute<? super Person, ?>> personEntityAttribtues; private Set<SingularAttribute<? super Skill, ?>> skillEntityAttribtues; private SingularAttribute<? super Person, Long> personIdAttribute; private SingularAttribute<? super Person, String> personFirstnameAttribute; private SingularAttribute<? super Person, Long> personAgeAttribute; private SingularAttribute<? super Person, Person> personFatherAttribute; private SingularAttribute<? super Person, Skill> personSkillAttribute; private SingularAttribute<? super Person, Address> personAddressAttribute; private SingularAttribute<? super Skill, String> skillNameAttribute; private SingularAttribute<? super Skill, Skill> skillNestedAttribute; @BeforeEach void setUp() { personIdAttribute = new SingularAttributeStub<>("id", PersistentAttributeType.BASIC, Long.class); personFirstnameAttribute = new SingularAttributeStub<>("firstname", PersistentAttributeType.BASIC, String.class); personAgeAttribute = new SingularAttributeStub<>("age", PersistentAttributeType.BASIC, Long.class); personFatherAttribute = new SingularAttributeStub<>("father", PersistentAttributeType.MANY_TO_ONE, Person.class, personEntityType); personSkillAttribute = new SingularAttributeStub<>("skill", PersistentAttributeType.EMBEDDED, Skill.class, skillEntityType); personAddressAttribute = new SingularAttributeStub<>("address", PersistentAttributeType.EMBEDDED, Address.class); skillNameAttribute = new SingularAttributeStub<>("name", PersistentAttributeType.BASIC, String.class); skillNestedAttribute = new SingularAttributeStub<>("nested", PersistentAttributeType.MANY_TO_ONE, Skill.class, skillEntityType); personEntityAttribtues = new LinkedHashSet<>(); personEntityAttribtues.add(personIdAttribute); personEntityAttribtues.add(personFirstnameAttribute); personEntityAttribtues.add(personAgeAttribute); personEntityAttribtues.add(personFatherAttribute); personEntityAttribtues.add(personAddressAttribute); personEntityAttribtues.add(personSkillAttribute); skillEntityAttribtues = new LinkedHashSet<>(); skillEntityAttribtues.add(skillNameAttribute); skillEntityAttribtues.add(skillNestedAttribute); doReturn(dummyPath).when(root).get(any(SingularAttribute.class)); doReturn(dummyPath).when(root).get(anyString()); doReturn(personEntityType).when(root).getModel(); doReturn(personEntityAttribtues).when(personEntityType).getSingularAttributes(); doReturn(skillEntityAttribtues).when(skillEntityType).getSingularAttributes(); doReturn(dummyPredicate).when(cb).equal(any(Expression.class), any(String.class)); doReturn(dummyPredicate).when(cb).equal(any(Expression.class), any(Long.class)); doReturn(dummyPredicate).when(cb).like(any(Expression.class), any(String.class), anyChar()); doReturn(expressionMock).when(cb).literal(any(Boolean.class)); doReturn(truePredicate).when(cb).isTrue(eq(expressionMock)); doReturn(andPredicate).when(cb).and(ArgumentMatchers.<Predicate[]> any()); doReturn(orPredicate).when(cb).or(ArgumentMatchers.<Predicate[]> any()); } @Test // DATAJPA-218 void getPredicateShouldThrowExceptionOnNullRoot() { assertThatIllegalArgumentException().isThrownBy( () -> QueryByExamplePredicateBuilder.getPredicate(null, cb, of(new Person()), EscapeCharacter.DEFAULT)); } @Test // DATAJPA-218 void getPredicateShouldThrowExceptionOnNullCriteriaBuilder() { assertThatIllegalArgumentException().isThrownBy( () -> QueryByExamplePredicateBuilder.getPredicate(root, null, of(new Person()), EscapeCharacter.DEFAULT)); } @Test // DATAJPA-218 void getPredicateShouldThrowExceptionOnNullExample() { assertThatIllegalArgumentException() .isThrownBy(() -> QueryByExamplePredicateBuilder.getPredicate(root, null, null, EscapeCharacter.DEFAULT)); } @Test // DATAJPA-218 void emptyCriteriaListShouldResultInNullPredicate() { assertThat(QueryByExamplePredicateBuilder.getPredicate(root, cb, of(new Person()), EscapeCharacter.DEFAULT)) .isNull(); } @Test // DATAJPA-218 void singleElementCriteriaShouldJustReturnIt() { Person p = new Person(); p.firstname = "foo"; assertThat(QueryByExamplePredicateBuilder.getPredicate(root, cb, of(p), EscapeCharacter.DEFAULT)) .isEqualTo(dummyPredicate); verify(cb, times(1)).equal(any(Expression.class), eq("foo")); } @Test // DATAJPA-218 void multiPredicateCriteriaShouldReturnCombinedOnes() { Person p = new Person(); p.firstname = "foo"; p.age = 2L; when(cb.and(any(Predicate[].class))).thenReturn(andPredicate); assertThat(QueryByExamplePredicateBuilder.getPredicate(root, cb, of(p), EscapeCharacter.DEFAULT)) .isEqualTo(andPredicate); verify(cb, times(1)).equal(any(Expression.class), eq("foo")); verify(cb, times(1)).equal(any(Expression.class), eq(2L)); } @Test // DATAJPA-879 void orConcatenatesPredicatesIfMatcherSpecifies() { Person person = new Person(); person.firstname = "foo"; person.age = 2L; Example<Person> example = of(person, ExampleMatcher.matchingAny()); when(cb.or(any(Predicate[].class))).thenReturn(orPredicate); assertThat(QueryByExamplePredicateBuilder.getPredicate(root, cb, example, EscapeCharacter.DEFAULT)) .isEqualTo(orPredicate); verify(cb).or(ArgumentMatchers.any(Predicate[].class)); } @Test // DATAJPA-1372 void considersSingularJoinedAttributes() { doReturn(from).when(root).join(anyString()); doReturn(dummyPath).when(dummyPath).get(any(SingularAttribute.class)); doReturn(dummyPath).when(dummyPath).get(anyString()); Person person = new Person(); person.skill = new Skill(); person.skill.nested = new Skill(); person.skill.nested.name = "foo"; Example<Person> example = of(person, ExampleMatcher.matching().withMatcher("skill.nested.name", GenericPropertyMatcher::contains)); assertThat(QueryByExamplePredicateBuilder.getPredicate(root, cb, example)).isEqualTo(dummyPredicate); verify(cb).like(dummyPath, "%foo%", '\\'); } @Test // DATAJPA-1534 void likePatternsGetEscapedContaining() { Person person = new Person(); person.firstname = "f\\o_o"; Example<Person> example = of( // person, // ExampleMatcher // .matchingAny() // .withStringMatcher(ExampleMatcher.StringMatcher.CONTAINING) // ); QueryByExamplePredicateBuilder.getPredicate(root, cb, example, EscapeCharacter.DEFAULT); verify(cb, times(1)).like(any(Expression.class), eq("%f\\\\o\\_o%"), eq('\\')); } @Test // DATAJPA-1534 void likePatternsGetEscapedStarting() { Person person = new Person(); person.firstname = "f\\o_o"; Example<Person> example = of( // person, // ExampleMatcher // .matchingAny() // .withStringMatcher(ExampleMatcher.StringMatcher.STARTING) // ); QueryByExamplePredicateBuilder.getPredicate(root, cb, example, EscapeCharacter.DEFAULT); verify(cb, times(1)).like(any(Expression.class), eq("f\\\\o\\_o%"), eq('\\')); } @Test // DATAJPA-1534 void likePatternsGetEscapedEnding() { Person person = new Person(); person.firstname = "f\\o_o"; Example<Person> example = of( // person, // ExampleMatcher // .matchingAny() // .withStringMatcher(ExampleMatcher.StringMatcher.ENDING) // ); QueryByExamplePredicateBuilder.getPredicate(root, cb, example, EscapeCharacter.DEFAULT); verify(cb, times(1)).like(any(Expression.class), eq("%f\\\\o\\_o"), eq('\\')); } @ParameterizedTest(name = "Matching {0} on association should join using JoinType.{1} ") // GH-3763 @CsvSource({ "ALL, INNER", "ANY, LEFT" }) void matchingAssociationShouldUseTheCorrectJoinType(MatchMode matchMode, JoinType expectedJoinType) { Person person = new Person(); person.father = new Person(); ExampleMatcher matcher = matchMode == MatchMode.ALL ? ExampleMatcher.matchingAll() : ExampleMatcher.matchingAny(); Example<Person> example = of(person, matcher); QueryByExamplePredicateBuilder.getPredicate(root, cb, example, EscapeCharacter.DEFAULT); verify(root, times(1)).join("father", expectedJoinType); } @SuppressWarnings("unused") static
QueryByExamplePredicateBuilderUnitTests
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/RMQueueAclInfo.java
{ "start": 1084, "end": 2216 }
class ____ { private Boolean allowed; private String user; private String diagnostics; private String subClusterId; public RMQueueAclInfo() { } public RMQueueAclInfo(boolean allowed, String user, String diagnostics) { this.allowed = allowed; this.user = user; this.diagnostics = diagnostics; } public RMQueueAclInfo(boolean allowed, String user, String diagnostics, String subClusterId) { this.allowed = allowed; this.user = user; this.diagnostics = diagnostics; this.subClusterId = subClusterId; } public boolean isAllowed() { return allowed; } public void setAllowed(boolean allowed) { this.allowed = allowed; } public String getUser() { return user; } public void setUser(String user) { this.user = user; } public String getDiagnostics() { return diagnostics; } public void setDiagnostics(String diagnostics) { this.diagnostics = diagnostics; } public String getSubClusterId() { return subClusterId; } public void setSubClusterId(String subClusterId) { this.subClusterId = subClusterId; } }
RMQueueAclInfo
java
google__auto
value/src/main/java/com/google/auto/value/processor/AnnotationOutput.java
{ "start": 4413, "end": 5303 }
class ____ extends SourceFormVisitor { private final ProcessingEnvironment processingEnv; private final String memberName; private final Element errorContext; InitializerSourceFormVisitor( ProcessingEnvironment processingEnv, String memberName, Element errorContext) { this.processingEnv = processingEnv; this.memberName = memberName; this.errorContext = errorContext; } @Override public Void visitAnnotation(AnnotationMirror a, StringBuilder sb) { processingEnv .getMessager() .printMessage( Diagnostic.Kind.ERROR, "@AutoAnnotation cannot yet supply a default value for annotation-valued member '" + memberName + "'", errorContext); sb.append("null"); return null; } } private static
InitializerSourceFormVisitor
java
elastic__elasticsearch
modules/lang-painless/src/test/java/org/elasticsearch/painless/CidrTests.java
{ "start": 513, "end": 2510 }
class ____ extends ScriptTestCase { public void testContains() { Object bool = exec("CIDR c = new CIDR('10.1.1.0/23'); c.contains('10.1.1.128') && c.contains('10.1.0.255')"); assertEquals(Boolean.TRUE, bool); bool = exec("CIDR c = new CIDR('10.1.1.0/25'); c.contains('10.1.1.127')"); assertEquals(Boolean.TRUE, bool); bool = exec("CIDR c = new CIDR('10.1.1.0/25'); c.contains('10.1.1.129')"); assertEquals(Boolean.FALSE, bool); bool = exec("new CIDR('192.168.3.5').contains('192.168.3.5')"); assertEquals(Boolean.TRUE, bool); bool = exec("new CIDR('192.168.3.5').contains('')"); assertEquals(Boolean.FALSE, bool); bool = exec("new CIDR('2001:0db8:85a3::/64').contains('2001:0db8:85a3:0000:0000:8a2e:0370:7334')"); assertEquals(Boolean.TRUE, bool); bool = exec("new CIDR('2001:0db8:85a3::/64').contains('2001:0db8:85a3:0001:0000:8a2e:0370:7334')"); assertEquals(Boolean.FALSE, bool); } public void testInvalidIPs() { IllegalArgumentException e = expectScriptThrows(IllegalArgumentException.class, () -> exec("new CIDR('abc')")); assertEquals("'abc' is not an IP string literal.", e.getMessage()); e = expectScriptThrows(IllegalArgumentException.class, () -> exec("new CIDR('10.257.3.5')")); assertEquals("'10.257.3.5' is not an IP string literal.", e.getMessage()); e = expectScriptThrows(IllegalArgumentException.class, () -> exec("new CIDR('2001:0db8:85a3:0000:0000:8a2e:0370:733g')")); assertEquals("'2001:0db8:85a3:0000:0000:8a2e:0370:733g' is not an IP string literal.", e.getMessage()); e = expectScriptThrows( IllegalArgumentException.class, () -> exec("new CIDR('2001:0db8:85a3::/64').contains('2001:0db8:85a3:0000:0000:8a2g:0370:7334')") ); assertEquals("'2001:0db8:85a3:0000:0000:8a2g:0370:7334' is not an IP string literal.", e.getMessage()); } }
CidrTests
java
elastic__elasticsearch
build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JarApiComparisonTask.java
{ "start": 3916, "end": 4162 }
class ____ { private final String path; public JarScanner(String path) { this.path = path; } private String getPath() { return path; } /** * Get a list of
JarScanner
java
apache__camel
components/camel-micrometer/src/main/java/org/apache/camel/component/micrometer/json/MicrometerModule.java
{ "start": 7885, "end": 8503 }
class ____ extends MeterSerializer<LongTaskTimer> { private final TimeUnit timeUnit; private LongTaskTimerSerializer(TimeUnit timeUnit) { super(LongTaskTimer.class); this.timeUnit = timeUnit; } @Override protected void serializeStatistics(LongTaskTimer timer, JsonGenerator json, SerializerProvider provider) throws IOException { json.writeNumberField("activeTasks", timer.activeTasks()); json.writeNumberField("duration", timer.duration(timeUnit)); } } private static final
LongTaskTimerSerializer
java
spring-projects__spring-boot
module/spring-boot-web-server/src/main/java/org/springframework/boot/web/server/autoconfigure/ServerProperties.java
{ "start": 7612, "end": 7757 }
class ____ { private final Session session = new Session(); public Session getSession() { return this.session; } public static
Reactive