language
stringclasses
1 value
repo
stringclasses
60 values
path
stringlengths
22
294
class_span
dict
source
stringlengths
13
1.16M
target
stringlengths
1
113
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/basic/InheritedTest.java
{ "start": 3808, "end": 4052 }
class ____ extends Person { String title; Employee() { } Employee(String name, String title) { super( name ); this.title = title; } void setTitle(String title) { this.title = title; } } @Entity private static
Employee
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/cdi/type/CdiSmokeTests.java
{ "start": 673, "end": 1822 }
class ____ { @Test void testCdiOperations() { final SeContainerInitializer cdiInitializer = SeContainerInitializer.newInstance() .disableDiscovery() .addBeanClasses( UrlType.class, OtherBean.class ); try ( final SeContainer cdiContainer = cdiInitializer.initialize() ) { final BeanManager beanManager = cdiContainer.getBeanManager(); final AnnotatedType<UrlType> annotatedType; try { annotatedType = beanManager.createAnnotatedType( UrlType.class ); } catch (Exception e) { throw new IllegalStateException( new NotYetReadyException( e ) ); } final InjectionTarget<UrlType> injectionTarget = beanManager .getInjectionTargetFactory( annotatedType ) .createInjectionTarget( null ); final CreationalContext<UrlType> creationalContext = beanManager.createCreationalContext( null ); final UrlType beanInstance = injectionTarget.produce( creationalContext ); injectionTarget.inject( beanInstance, creationalContext ); injectionTarget.postConstruct( beanInstance ); assertThat( beanInstance ).isNotNull(); // assertThat( beanInstance.getOtherBean() ).isNotNull(); } } }
CdiSmokeTests
java
processing__processing4
app/src/processing/app/Sketch.java
{ "start": 29193, "end": 49041 }
class ____ .jar files, * because they can cause trouble. */ public boolean saveAs() throws IOException { String newParentDir = null; String newSketchName = null; final String PROMPT = Language.text("save"); // https://github.com/processing/processing4/issues/77 boolean useNative = Preferences.getBoolean("chooser.files.native"); if (useNative) { // get new name for folder FileDialog fd = new FileDialog(editor, PROMPT, FileDialog.SAVE); if (isReadOnly() || isUntitled()) { // default to the sketchbook folder fd.setDirectory(Preferences.getSketchbookPath()); } else { // default to the parent folder of where this was fd.setDirectory(folder.getParent()); } String oldFolderName = folder.getName(); fd.setFile(oldFolderName); fd.setVisible(true); newParentDir = fd.getDirectory(); newSketchName = fd.getFile(); } else { JFileChooser fc = new JFileChooser(); fc.setDialogTitle(PROMPT); if (isReadOnly() || isUntitled()) { // default to the sketchbook folder fc.setCurrentDirectory(new File(Preferences.getSketchbookPath())); } else { // default to the parent folder of where this was fc.setCurrentDirectory(folder.getParentFile()); } // can't do this, will try to save into itself by default //fc.setSelectedFile(folder); int result = fc.showSaveDialog(editor); if (result == JFileChooser.APPROVE_OPTION) { File selection = fc.getSelectedFile(); newParentDir = selection.getParent(); newSketchName = selection.getName(); } } // user canceled selection if (newSketchName == null) return false; boolean sync = Preferences.getBoolean("editor.sync_folder_and_filename"); String newMainFileName = null; // only set with !sync File newFolder; if (sync) { // before 4.0 beta 6 //String sanitaryName = Sketch.checkName(newSketchName); String newMainName = sanitizeName(newSketchName); newFolder = new File(newParentDir, newMainName); if (!newMainName.equals(newSketchName) && newFolder.exists()) { Messages.showMessage(Language.text("save_file.messages.sketch_exists"), Language.interpolate("save_file.messages.sketch_exists.description", newMainName)); return false; } newSketchName = newMainName; newMainFileName = newMainName + "." + mode.getDefaultExtension(); } else { newFolder = new File(newParentDir, newSketchName); // sketch folder name can be different } // make sure there doesn't exist a tab with that name already // but ignore this situation for the first tab, since it's probably being // re-saved (with the same name) to another location/folder. for (int i = 1; i < codeCount; i++) { if (newSketchName.equalsIgnoreCase(code[i].getPrettyName())) { Messages.showMessage(Language.text("save_file.messages.tab_exists"), Language.interpolate("save_file.messages.tab_exists.description", newSketchName)); return false; } } // check if the paths are identical if (newFolder.equals(folder)) { // just use "save" here instead, because the user will have received a // message (from the operating system) about "do you want to replace?" return save(); } // check to see if the user is trying to save this sketch inside itself try { // Includes the separator so that a/b/c is different from a/b/c2. // (a/b/c matches a/b/c2, but a/b/c/ does not match a/b/c2/) String newPath = newFolder.getCanonicalPath() + File.separator; String oldPath = folder.getCanonicalPath() + File.separator; if (newPath.indexOf(oldPath) == 0) { Messages.showWarning(Language.text("save_file.messages.recursive_save"), Language.text("save_file.messages.recursive_save.description")); return false; } } catch (IOException ignored) { } // if the new folder already exists, then first remove its contents before // copying everything over (user will have already been warned). if (newFolder.exists()) { //Util.removeDir(newFolder); try { Platform.deleteFile(newFolder); } catch (IOException e) { e.printStackTrace(); } } // in fact, you can't do this on Windows because the file dialog // will instead put you inside the folder, but it happens on OS X a lot. // now make a fresh copy of the folder if (!newFolder.mkdirs()) { // mkdirs() returns true when the folders are created, which should // be the case here because we removed any existing 'newFolder' above. // If this fails, then it probably means the removeDir() failed, // or at least left things behind, which could mean badness later. System.err.println("Error creating path " + newFolder); } // grab the contents of the current tab before saving // first get the contents of the editor text area updateSketchCodes(); File[] copyItems = folder.listFiles(file -> { String name = file.getName(); // just in case the OS likes to return these as if they're legit if (name.equals(".") || name.equals("..")) { return false; } // list of files/folders to be ignored during "save as" String[] ignorable = mode.getIgnorable(); if (ignorable != null) { for (String ignore : ignorable) { if (name.equals(ignore)) { return false; } } } // ignore the extensions for code, since that'll be copied below for (String ext : mode.getExtensions()) { if (name.endsWith(ext)) { return false; } } // don't do screen captures, since there might be thousands. kind of // a hack, but seems harmless. hm, where have i heard that before... //noinspection RedundantIfStatement if (name.startsWith("screen-")) { return false; } return true; }); startSaveAsThread(newFolder, copyItems); // Save each tab to its new location for (int i = 0; i < codeCount; i++) { File newFile = new File(newFolder, code[i].getFileName()); if (i == 0 && sync) { newFile = new File(newFolder, newMainFileName); } code[i].saveAs(newFile); } // We were removing the old folder from the Recent menu, but folks // did not like that behavior because they expected to have older // versions readily available, so we shut it off in 3.5.4 and 4.x. // https://github.com/processing/processing/issues/5902 // if (sync) { // // save the main tab with its new name // File newFile = new File(newFolder, newMainName + "." + mode.getDefaultExtension()); // code[0].saveAs(newFile); // } updateInternal(newFolder); // Make sure that it's not an untitled sketch setUntitled(false); // Add this sketch back using the new name Recent.append(editor); // let Editor know that the save was successful return true; } AtomicBoolean saving = new AtomicBoolean(); public boolean isSaving() { return saving.get(); } /** * Kick off a background thread to copy everything *but* the .pde files. * Due to the poor way (dating back to the late 90s with DBN) that our * save() and saveAs() methods have been implemented to return booleans, * there isn't a good way to return a value to the calling thread without * a good bit of refactoring (that should be done at some point). * As a result, this method will return 'true' before the full "Save As" * has completed, which will cause problems in weird cases. * <p/> * For instance, the threading will cause problems while saving an untitled * sketch that has an enormous data folder while quitting. The save thread to * move those data folder files won't have finished before this returns true, * and the PDE may quit before the SwingWorker completes its job. * <p/> * <a href="https://github.com/processing/processing/issues/3843">3843</a> */ void startSaveAsThread(final File newFolder, final File[] copyItems) { saving.set(true); EventQueue.invokeLater(() -> { final JFrame frame = new JFrame("Saving “" + newFolder.getName() + "“…"); frame.setDefaultCloseOperation(WindowConstants.HIDE_ON_CLOSE); Box box = Box.createVerticalBox(); box.setBorder(new EmptyBorder(16, 16, 16, 16)); if (Platform.isMacOS()) { frame.setBackground(Color.WHITE); } JLabel label = new JLabel("Saving additional files from the sketch folder..."); box.add(label); box.add(Box.createVerticalStrut(8)); final JProgressBar progressBar = new JProgressBar(0, 100); // no luck, stuck with ugly on OS X //progressBar.putClientProperty("JComponent.sizeVariant", "regular"); progressBar.setValue(0); progressBar.setStringPainted(true); box.add(progressBar); frame.getContentPane().add(box); frame.pack(); frame.setLocationRelativeTo(editor); Toolkit.setIcon(frame); frame.setVisible(true); new SwingWorker<Void, Void>() { @Override protected Void doInBackground() throws Exception { addPropertyChangeListener(evt -> { if ("progress".equals(evt.getPropertyName())) { progressBar.setValue((Integer) evt.getNewValue()); } }); long totalSize = 0; for (File copyable : copyItems) { totalSize += Util.calcSize(copyable); } long progress = 0; setProgress(0); for (File copyable : copyItems) { if (copyable.isDirectory()) { copyDir(copyable, new File(newFolder, copyable.getName()), progress, totalSize); progress += Util.calcSize(copyable); } else { copyFile(copyable, new File(newFolder, copyable.getName()), progress, totalSize); if (Util.calcSize(copyable) < 512 * 1024) { // If the file length > 0.5MB, the copyFile() function has // been redesigned to change progress every 0.5MB so that // the progress bar doesn't stagnate during that time progress += Util.calcSize(copyable); setProgress((int) (progress * 100L / totalSize)); } } } saving.set(false); return null; } /** * Overloaded copyFile that is called whenever a Save As is being done, * so that the ProgressBar is updated for very large files as well. */ void copyFile(File sourceFile, File targetFile, long progress, long totalSize) throws IOException { BufferedInputStream from = new BufferedInputStream(new FileInputStream(sourceFile)); BufferedOutputStream to = new BufferedOutputStream(new FileOutputStream(targetFile)); byte[] buffer = new byte[16 * 1024]; int bytesRead; int progRead = 0; while ((bytesRead = from.read(buffer)) != -1) { to.write(buffer, 0, bytesRead); progRead += bytesRead; if (progRead >= 512 * 1024) { // to update progress bar every 0.5MB progress += progRead; //progressBar.setValue((int) Math.min(Math.ceil(progress * 100.0 / totalSize), 100)); setProgress((int) (100L * progress / totalSize)); progRead = 0; } } // Final update to progress bar setProgress((int) (100L * progress / totalSize)); from.close(); to.flush(); to.close(); if (!targetFile.setLastModified(sourceFile.lastModified())) { System.err.println("Warning: Could not set modification date/time for " + targetFile); } if (!targetFile.setExecutable(sourceFile.canExecute())) { if (!Platform.isWindows()) { // more of a UNIX thing System.err.println("Warning: Could not set permissions for " + targetFile); } } } long copyDir(File sourceDir, File targetDir, long progress, long totalSize) throws IOException { // Overloaded copyDir so that the Save As progress bar gets updated when the // files are in folders as well (like in the data folder) if (sourceDir.equals(targetDir)) { final String urDum = "source and target directories are identical"; throw new IllegalArgumentException(urDum); } targetDir.mkdirs(); String[] files = sourceDir.list(); if (files != null) { for (String filename : files) { // Ignore dot files (.DS_Store), dot folders (.svn) while copying if (filename.charAt(0) == '.') { continue; } File source = new File(sourceDir, filename); File target = new File(targetDir, filename); if (source.isDirectory()) { progress = copyDir(source, target, progress, totalSize); //progressBar.setValue((int) Math.min(Math.ceil(progress * 100.0 / totalSize), 100)); setProgress((int) (100L * progress / totalSize)); target.setLastModified(source.lastModified()); } else { copyFile(source, target, progress, totalSize); progress += source.length(); //progressBar.setValue((int) Math.min(Math.ceil(progress * 100.0 / totalSize), 100)); setProgress((int) (100L * progress / totalSize)); } } } else { throw new IOException("Could not list files inside " + sourceDir); } return progress; } @Override public void done() { frame.dispose(); editor.statusNotice(Language.text("editor.status.saving.done")); } }.execute(); }); } /** * Update internal state for new sketch name or folder location. */ protected void updateInternal(File sketchFolder) { // reset all the state information for the sketch object mainFile = code[0].getFile(); name = sketchFolder.getName(); folder = sketchFolder; disappearedWarning = false; codeFolder = new File(folder, "code"); dataFolder = new File(folder, "data"); updateNameProperties(); // Name changed, rebuild the sketch menus calcModified(); editor.updateTitle(); editor.getBase().rebuildSketchbook(); } protected void updateModeProperties(Mode mode, Mode defaultMode) { updateModeProperties(folder, mode, defaultMode); } /** * Create or modify a sketch.properties file to specify the given Mode. */ static protected void updateModeProperties(File folder, Mode mode, Mode defaultMode) { try { // Read the old sketch.properties file if it already exists Settings props = loadProperties(folder); // If changing to the default Mode, // remove those entries from sketch.properties if (mode == defaultMode) { props.remove("mode"); props.remove("mode.id"); } else { // Setting to something other than the default Mode, // write that and any other params already in the file. props.set("mode", mode.getTitle()); props.set("mode.id", mode.getIdentifier()); } props.reckon(); } catch (IOException e) { System.err.println("Error while writing sketch.properties"); e.printStackTrace(); } } /* protected Settings loadProperties() throws IOException { return loadProperties(folder); } */ /** * Opens and parses sketch.properties. If it does not exist, returns an * empty Settings object that can be written back to the same location. */ static protected Settings loadProperties(File folder) throws IOException { /* File propsFile = new File(folder, "sketch.properties"); if (propsFile.exists()) { return new Settings(propsFile); } return null; */ return new Settings(new File(folder, "sketch.properties")); } /** * Check through the various modes and see if this is a legit sketch. * Because the default mode will be the first in the list, this will always * prefer that one over the others. */ static protected File findMain(File folder, List<Mode> modeList) { try { Settings props = Sketch.loadProperties(folder); String main = props.get("main"); if (main != null) { File mainFile = new File(folder, main); if (!mainFile.exists()) { System.err.println(main + " does not exist inside " + folder); // Fall through to the code below in case we can recover. // Not removing the bad entry since this is a find() method. } } } catch (IOException e) { e.printStackTrace(); } for (Mode mode : modeList) { // Test whether a .pde file of the same name as its parent folder exists. String defaultName = folder.getName() + "." + mode.getDefaultExtension(); File entry = new File(folder, defaultName); if (entry.exists()) { return entry; } } return null; } private void updateNameProperties() { // If the main file and the sketch name are not identical, // update sketch.properties. String mainName = mainFile.getName(); String defaultName = name + "." + mode.getDefaultExtension(); //System.out.println("main name is " + mainName + " and default name is " + defaultName); try { // Read the old sketch.properties file if it already exists Settings props = loadProperties(folder); if (mainName.equals(defaultName)) { props.remove("main"); } else { props.set("main", mainName); } //System.out.println("props size is now " + props.getMap().size()); props.reckon(); } catch (IOException e) { System.err.println("Error while updating sketch.properties"); e.printStackTrace(); } } /** * Prompt the user for a new file to the sketch, then call the * other addFile() function to actually add it. */ public void handleAddFile() { // make sure the user didn't hide the sketch folder ensureExistence(); // if read-only, give an error if (isReadOnly()) { // if the files are read-only, need to first do a "save as". Messages.showMessage(Language.text("add_file.messages.is_read_only"), Language.text("add_file.messages.is_read_only.description")); return; } // get a dialog, select a file to add to the sketch String prompt = Language.text("file"); //FileDialog fd = new FileDialog(new Frame(), prompt, FileDialog.LOAD); FileDialog fd = new FileDialog(editor, prompt, FileDialog.LOAD); fd.setVisible(true); String directory = fd.getDirectory(); String filename = fd.getFile(); if (filename == null) return; // copy the file into the folder. if people would rather // move instead of copy, they can do it by hand File sourceFile = new File(directory, filename); // now do the work of adding the file boolean result = addFile(sourceFile); if (result) { // editor.statusNotice("One file added to the sketch."); //Done from within TaskAddFile inner
and
java
apache__hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/FilterExpression.java
{ "start": 1229, "end": 3526 }
class ____ implements Expression, Configurable { protected Expression expression; protected FilterExpression(Expression expression) { this.expression = expression; } @Override public void setOptions(FindOptions options) throws IOException { if (expression != null) { expression.setOptions(options); } } @Override public void prepare() throws IOException { if (expression != null) { expression.prepare(); } } @Override public Result apply(PathData item, int depth) throws IOException { if (expression != null) { return expression.apply(item, -1); } return Result.PASS; } @Override public void finish() throws IOException { if (expression != null) { expression.finish(); } } @Override public String[] getUsage() { if (expression != null) { return expression.getUsage(); } return null; } @Override public String[] getHelp() { if (expression != null) { return expression.getHelp(); } return null; } @Override public boolean isAction() { if (expression != null) { return expression.isAction(); } return false; } @Override public boolean isOperator() { if (expression != null) { return expression.isOperator(); } return false; } @Override public int getPrecedence() { if (expression != null) { return expression.getPrecedence(); } return -1; } @Override public void addChildren(Deque<Expression> expressions) { if (expression != null) { expression.addChildren(expressions); } } @Override public void addArguments(Deque<String> args) { if (expression != null) { expression.addArguments(args); } } @Override public void setConf(Configuration conf) { if (expression instanceof Configurable) { ((Configurable) expression).setConf(conf); } } @Override public Configuration getConf() { if (expression instanceof Configurable) { return ((Configurable) expression).getConf(); } return null; } @Override public String toString() { if (expression != null) { return getClass().getSimpleName() + "-" + expression.toString(); } return getClass().getSimpleName(); } }
FilterExpression
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KeycloakEndpointBuilderFactory.java
{ "start": 86883, "end": 88852 }
interface ____ { /** * Keycloak (camel-keycloak) * Manage Keycloak instances via Admin API. * * Category: security,management * Since: 4.15 * Maven coordinates: org.apache.camel:camel-keycloak * * @return the dsl builder for the headers' name. */ default KeycloakHeaderNameBuilder keycloak() { return KeycloakHeaderNameBuilder.INSTANCE; } /** * Keycloak (camel-keycloak) * Manage Keycloak instances via Admin API. * * Category: security,management * Since: 4.15 * Maven coordinates: org.apache.camel:camel-keycloak * * Syntax: <code>keycloak:label</code> * * Path parameter: label (required) * Logical name * * @param path label * @return the dsl builder */ default KeycloakEndpointBuilder keycloak(String path) { return KeycloakEndpointBuilderFactory.endpointBuilder("keycloak", path); } /** * Keycloak (camel-keycloak) * Manage Keycloak instances via Admin API. * * Category: security,management * Since: 4.15 * Maven coordinates: org.apache.camel:camel-keycloak * * Syntax: <code>keycloak:label</code> * * Path parameter: label (required) * Logical name * * @param componentName to use a custom component name for the endpoint * instead of the default name * @param path label * @return the dsl builder */ default KeycloakEndpointBuilder keycloak(String componentName, String path) { return KeycloakEndpointBuilderFactory.endpointBuilder(componentName, path); } } /** * The builder of headers' name for the Keycloak component. */ public static
KeycloakBuilders
java
ReactiveX__RxJava
src/test/java/io/reactivex/rxjava3/internal/operators/observable/ObservableDeferTest.java
{ "start": 1005, "end": 2896 }
class ____ extends RxJavaTest { @Test public void defer() throws Throwable { Supplier<Observable<String>> factory = mock(Supplier.class); Observable<String> firstObservable = Observable.just("one", "two"); Observable<String> secondObservable = Observable.just("three", "four"); when(factory.get()).thenReturn(firstObservable, secondObservable); Observable<String> deferred = Observable.defer(factory); verifyNoInteractions(factory); Observer<String> firstObserver = TestHelper.mockObserver(); deferred.subscribe(firstObserver); verify(factory, times(1)).get(); verify(firstObserver, times(1)).onNext("one"); verify(firstObserver, times(1)).onNext("two"); verify(firstObserver, times(0)).onNext("three"); verify(firstObserver, times(0)).onNext("four"); verify(firstObserver, times(1)).onComplete(); Observer<String> secondObserver = TestHelper.mockObserver(); deferred.subscribe(secondObserver); verify(factory, times(2)).get(); verify(secondObserver, times(0)).onNext("one"); verify(secondObserver, times(0)).onNext("two"); verify(secondObserver, times(1)).onNext("three"); verify(secondObserver, times(1)).onNext("four"); verify(secondObserver, times(1)).onComplete(); } @Test public void deferFunctionThrows() throws Throwable { Supplier<Observable<String>> factory = mock(Supplier.class); when(factory.get()).thenThrow(new TestException()); Observable<String> result = Observable.defer(factory); Observer<String> o = TestHelper.mockObserver(); result.subscribe(o); verify(o).onError(any(TestException.class)); verify(o, never()).onNext(any(String.class)); verify(o, never()).onComplete(); } }
ObservableDeferTest
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/converted/converter/ConvertedAttributesTypecheckTest.java
{ "start": 1375, "end": 4418 }
class ____ { private static final Date TEST_DATE = new GregorianCalendar( 1996, Calendar.MAY, 20, 6, 30 ).getTime(); @BeforeAll public void setUp(SessionFactoryScope scope) { scope.inTransaction( session -> session.persist( new TestEntity( Set.of( "one", "two" ), "123", "3", TEST_DATE ) ) ); } @AfterAll public void tearDown(SessionFactoryScope scope) { scope.inTransaction( session -> session.createMutationQuery( "delete from TestEntity" ).executeUpdate() ); } @Test public void testLikeOnConvertedString(SessionFactoryScope scope) { scope.inTransaction( session -> { final TestEntity result = session.createQuery( "from TestEntity where convertedString like 'one%'", TestEntity.class ).getSingleResult(); assertThat( result.getConvertedString() ).contains( "one" ); } ); } @Test public void testBinaryArithmeticOnConvertedNumber(SessionFactoryScope scope) { scope.inTransaction( session -> { assertThat( session.createQuery( "select convertedNumber - 123 from TestEntity", Integer.class ).getSingleResult() ).isEqualTo( 0 ); assertThat( session.createQuery( "select 123 + convertedNumber from TestEntity", Integer.class ).getSingleResult() ).isEqualTo( 246 ); } ); } @Test public void testUnaryExpressionOnConvertedNumber(SessionFactoryScope scope) { scope.inTransaction( session -> { assertThat( session.createQuery( "select -convertedNumber from TestEntity", Integer.class ).getSingleResult() ).isEqualTo( -123 ); assertThat( session.createQuery( "from TestEntity where -convertedNumber = -123", TestEntity.class ).getSingleResult().getConvertedNumber() ).isEqualTo( "123" ); } ); } @Test public void testFromDurationExpressionOnConvertedDuration(SessionFactoryScope scope) { scope.inTransaction( session -> { assertThat( session.createQuery( "select convertedDuration by day from TestEntity", Long.class ).getSingleResult() ).isEqualTo( 3L ); assertThat( session.createQuery( "from TestEntity where convertedDuration by day = 3", TestEntity.class ).getSingleResult().getConvertedDuration() ).isEqualTo( "3" ); } ); } @Test @Jira( "https://hibernate.atlassian.net/browse/HHH-18400" ) public void test(SessionFactoryScope scope) { scope.inTransaction( session -> { final CriteriaBuilder criteriaBuilder = session.getCriteriaBuilder(); final CriteriaQuery<TestEntity> criteriaQuery = criteriaBuilder.createQuery( TestEntity.class ); final Root<TestEntity> root = criteriaQuery.from( TestEntity.class ); final ParameterExpression<Date> dateParameter = criteriaBuilder.parameter( Date.class ); final TestEntity entity = session .createQuery( criteriaQuery.where( criteriaBuilder.equal( root.get( "convertedDate" ), dateParameter ) ) ).setParameter( dateParameter, TEST_DATE ).getSingleResult(); assertThat( entity ).isNotNull(); } ); } @Entity( name = "TestEntity" ) public static
ConvertedAttributesTypecheckTest
java
hibernate__hibernate-orm
hibernate-testing/src/main/java/org/hibernate/testing/orm/domain/MappingFeature.java
{ "start": 650, "end": 1061 }
enum ____ { CONVERTER, ENUMERATED, DYNAMIC_MODEL, DISCRIMINATOR_INHERIT, JOINED_INHERIT, UNION_INHERIT, SECONDARY_TABLE, AGG_COMP_ID, NON_AGG_COMP_ID, ID_CLASS, EMBEDDABLE, MANY_ONE, ONE_ONE, ONE_MANY, MANY_MANY, ANY, MANY_ANY, COLLECTION_TABLE, JOIN_TABLE, JOIN_COLUMN, ; public static EnumSet<MappingFeature> all() { return EnumSet.allOf( MappingFeature.class ); } }
MappingFeature
java
apache__hadoop
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
{ "start": 2701, "end": 22898 }
class ____ { public static final Logger LOG = LoggerFactory.getLogger(TestLeaseRecovery2.class); { GenericTestUtils.setLogLevel(DataNode.LOG, Level.TRACE); GenericTestUtils.setLogLevel(LeaseManager.LOG, Level.TRACE); GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.TRACE); } static final private long BLOCK_SIZE = 1024; static final private int FILE_SIZE = (int)BLOCK_SIZE*2; static final short REPLICATION_NUM = (short)3; static final byte[] buffer = new byte[FILE_SIZE]; static private final String fakeUsername = "fakeUser1"; static private final String fakeGroup = "supergroup"; static private MiniDFSCluster cluster; static private DistributedFileSystem dfs; final static private Configuration conf = new HdfsConfiguration(); final static private int BUF_SIZE = conf.getInt( CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096); final static private long SHORT_LEASE_PERIOD = 1000L; final static private long LONG_LEASE_PERIOD = 60*60*SHORT_LEASE_PERIOD; /** start a dfs cluster * * @throws IOException */ @BeforeEach public void startUp() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(5) .checkExitOnShutdown(false) .build(); cluster.waitActive(); dfs = cluster.getFileSystem(); } /** * stop the cluster * @throws IOException */ @AfterEach public void tearDown() throws IOException { if (cluster != null) { IOUtils.closeStream(dfs); cluster.shutdown(); } } /** * Test the NameNode's revoke lease on current lease holder function. * @throws Exception */ @Test public void testImmediateRecoveryOfLease() throws Exception { //create a file // write bytes into the file. byte [] actual = new byte[FILE_SIZE]; int size = AppendTestUtil.nextInt(FILE_SIZE); Path filepath = createFile("/immediateRecoverLease-shortlease", size, true); // set the soft limit to be 1 second so that the // namenode triggers lease recovery on next attempt to write-for-open. cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD); recoverLeaseUsingCreate(filepath); verifyFile(dfs, filepath, actual, size); //test recoverLease // set the soft limit to be 1 hour but recoverLease should // close the file immediately cluster.setLeasePeriod(LONG_LEASE_PERIOD, LONG_LEASE_PERIOD); size = AppendTestUtil.nextInt(FILE_SIZE); filepath = createFile("/immediateRecoverLease-longlease", size, false); // test recoverLease from a different client recoverLease(filepath, null); verifyFile(dfs, filepath, actual, size); // test recoverlease from the same client size = AppendTestUtil.nextInt(FILE_SIZE); filepath = createFile("/immediateRecoverLease-sameclient", size, false); // create another file using the same client Path filepath1 = new Path(filepath.toString() + AppendTestUtil.nextInt()); FSDataOutputStream stm = dfs.create(filepath1, true, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE); // recover the first file recoverLease(filepath, dfs); verifyFile(dfs, filepath, actual, size); // continue to write to the second file stm.write(buffer, 0, size); stm.close(); verifyFile(dfs, filepath1, actual, size); } @Test public void testCloseWhileRecoverLease() throws Exception { // test recoverLease // set the soft limit to be 1 hour but recoverLease should // close the file immediately cluster.setLeasePeriod(LONG_LEASE_PERIOD, LONG_LEASE_PERIOD); int size = AppendTestUtil.nextInt((int) BLOCK_SIZE); String filestr = "/testCloseWhileRecoverLease"; AppendTestUtil.LOG.info("filestr=" + filestr); Path filepath = new Path(filestr); FSDataOutputStream stm = dfs.create(filepath, true, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE); assertTrue(dfs.dfs.exists(filestr)); // hflush file AppendTestUtil.LOG.info("hflush"); stm.hflush(); // Pause DN block report. // Let client recover lease, and then close the file, and then let DN // report blocks. ArrayList<DataNode> dataNodes = cluster.getDataNodes(); for (DataNode dn: dataNodes) { DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false); } LOG.info("pause IBR"); for (DataNode dn: dataNodes) { DataNodeTestUtils.pauseIBR(dn); } AppendTestUtil.LOG.info("size=" + size); stm.write(buffer, 0, size); // hflush file AppendTestUtil.LOG.info("hflush"); stm.hflush(); LOG.info("recover lease"); dfs.recoverLease(filepath); try { stm.close(); fail("close() should fail because the file is under recovery."); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "whereas it is under recovery", ioe); } for (DataNode dn: dataNodes) { DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false); } LOG.info("trigger heartbeats"); // resume DN block report for (DataNode dn: dataNodes) { DataNodeTestUtils.triggerHeartbeat(dn); } stm.close(); assertEquals(cluster.getNamesystem().getBlockManager().getMissingBlocksCount(), 0); } @Test public void testLeaseRecoverByAnotherUser() throws Exception { byte [] actual = new byte[FILE_SIZE]; cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD); Path filepath = createFile("/immediateRecoverLease-x", 0, true); recoverLeaseUsingCreate2(filepath); verifyFile(dfs, filepath, actual, 0); } private Path createFile(final String filestr, final int size, final boolean triggerLeaseRenewerInterrupt) throws IOException, InterruptedException { AppendTestUtil.LOG.info("filestr=" + filestr); Path filepath = new Path(filestr); FSDataOutputStream stm = dfs.create(filepath, true, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE); assertTrue(dfs.dfs.exists(filestr)); AppendTestUtil.LOG.info("size=" + size); stm.write(buffer, 0, size); // hflush file AppendTestUtil.LOG.info("hflush"); stm.hflush(); if (triggerLeaseRenewerInterrupt) { AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()"); dfs.dfs.getLeaseRenewer().interruptAndJoin(); } return filepath; } private void recoverLease(Path filepath, DistributedFileSystem dfs) throws Exception { if (dfs == null) { dfs = (DistributedFileSystem)getFSAsAnotherUser(conf); } while (!dfs.recoverLease(filepath)) { AppendTestUtil.LOG.info("sleep " + 5000 + "ms"); Thread.sleep(5000); } } private FileSystem getFSAsAnotherUser(final Configuration c) throws IOException, InterruptedException { return FileSystem.get(FileSystem.getDefaultUri(c), c, UserGroupInformation.createUserForTesting(fakeUsername, new String [] {fakeGroup}).getUserName()); } private void recoverLeaseUsingCreate(Path filepath) throws IOException, InterruptedException { FileSystem dfs2 = getFSAsAnotherUser(conf); for(int i = 0; i < 10; i++) { AppendTestUtil.LOG.info("i=" + i); try { dfs2.create(filepath, false, BUF_SIZE, (short)1, BLOCK_SIZE); fail("Creation of an existing file should never succeed."); } catch(FileAlreadyExistsException e) { return; // expected } catch(AlreadyBeingCreatedException e) { return; // expected } catch(IOException ioe) { AppendTestUtil.LOG.warn("UNEXPECTED ", ioe); AppendTestUtil.LOG.info("sleep " + 5000 + "ms"); try {Thread.sleep(5000);} catch (InterruptedException e) {} } } fail("recoverLeaseUsingCreate failed"); } private void recoverLeaseUsingCreate2(Path filepath) throws Exception { FileSystem dfs2 = getFSAsAnotherUser(conf); int size = AppendTestUtil.nextInt(FILE_SIZE); DistributedFileSystem dfsx = (DistributedFileSystem) dfs2; //create file using dfsx Path filepath2 = new Path("/immediateRecoverLease-x2"); FSDataOutputStream stm = dfsx.create(filepath2, true, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE); assertTrue(dfsx.dfs.exists("/immediateRecoverLease-x2")); try {Thread.sleep(10000);} catch (InterruptedException e) {} dfsx.append(filepath); } private void verifyFile(FileSystem dfs, Path filepath, byte[] actual, int size) throws IOException { AppendTestUtil.LOG.info("Lease for file " + filepath + " is recovered. " + "Validating its contents now..."); // verify that file-size matches assertTrue(dfs.getFileStatus(filepath).getLen() == size, "File should be " + size + " bytes, but is actually " + " found to be " + dfs.getFileStatus(filepath).getLen() + " bytes"); // verify that there is enough data to read. System.out.println("File size is good. Now validating sizes from datanodes..."); FSDataInputStream stmin = dfs.open(filepath); stmin.readFully(0, actual, 0, size); stmin.close(); } /** * This test makes the client does not renew its lease and also * set the hard lease expiration period to be short 1s. Thus triggering * lease expiration to happen while the client is still alive. * * The test makes sure that the lease recovery completes and the client * fails if it continues to write to the file. * * @throws Exception */ @Test public void testHardLeaseRecovery() throws Exception { //create a file String filestr = "/hardLeaseRecovery"; AppendTestUtil.LOG.info("filestr=" + filestr); Path filepath = new Path(filestr); FSDataOutputStream stm = dfs.create(filepath, true, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE); assertTrue(dfs.dfs.exists(filestr)); // write bytes into the file. int size = AppendTestUtil.nextInt(FILE_SIZE); AppendTestUtil.LOG.info("size=" + size); stm.write(buffer, 0, size); // hflush file AppendTestUtil.LOG.info("hflush"); stm.hflush(); // kill the lease renewal thread AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()"); dfs.dfs.getLeaseRenewer().interruptAndJoin(); // set the hard limit to be 1 second cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD); // wait for lease recovery to complete LocatedBlocks locatedBlocks; do { Thread.sleep(SHORT_LEASE_PERIOD); locatedBlocks = dfs.dfs.getLocatedBlocks(filestr, 0L, size); } while (locatedBlocks.isUnderConstruction()); assertEquals(size, locatedBlocks.getFileLength()); // make sure that the writer thread gets killed try { stm.write('b'); stm.close(); fail("Writer thread should have been killed"); } catch (IOException e) { e.printStackTrace(); } // verify data AppendTestUtil.LOG.info( "File size is good. Now validating sizes from datanodes..."); AppendTestUtil.checkFullFile(dfs, filepath, size, buffer, filestr); } /** * This test makes the client does not renew its lease and also * set the soft lease expiration period to be short 1s. Thus triggering * soft lease expiration to happen immediately by having another client * trying to create the same file. * * The test makes sure that the lease recovery completes. * * @throws Exception */ @Test public void testSoftLeaseRecovery() throws Exception { Map<String, String []> u2g_map = new HashMap<String, String []>(1); u2g_map.put(fakeUsername, new String[] {fakeGroup}); DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map); long hardlimit = conf.getLong(DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY, DFSConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT) * 1000; // Reset default lease periods cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, hardlimit); //create a file // create a random file name String filestr = "/foo" + AppendTestUtil.nextInt(); AppendTestUtil.LOG.info("filestr=" + filestr); Path filepath = new Path(filestr); FSDataOutputStream stm = dfs.create(filepath, true, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE); assertTrue(dfs.dfs.exists(filestr)); // write random number of bytes into it. int size = AppendTestUtil.nextInt(FILE_SIZE); AppendTestUtil.LOG.info("size=" + size); stm.write(buffer, 0, size); // hflush file AppendTestUtil.LOG.info("hflush"); stm.hflush(); AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()"); dfs.dfs.getLeaseRenewer().interruptAndJoin(); // set the soft limit to be 1 second so that the // namenode triggers lease recovery on next attempt to write-for-open. cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD); // try to re-open the file before closing the previous handle. This // should fail but will trigger lease recovery. { UserGroupInformation ugi = UserGroupInformation.createUserForTesting(fakeUsername, new String [] { fakeGroup}); FileSystem dfs2 = DFSTestUtil.getFileSystemAs(ugi, conf); boolean done = false; for(int i = 0; i < 10 && !done; i++) { AppendTestUtil.LOG.info("i=" + i); try { dfs2.create(filepath, false, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE); fail("Creation of an existing file should never succeed."); } catch (FileAlreadyExistsException ex) { done = true; } catch (AlreadyBeingCreatedException ex) { AppendTestUtil.LOG.info("GOOD! got " + ex.getMessage()); } catch (IOException ioe) { AppendTestUtil.LOG.warn("UNEXPECTED IOException", ioe); } if (!done) { AppendTestUtil.LOG.info("sleep " + 5000 + "ms"); try {Thread.sleep(5000);} catch (InterruptedException e) {} } } assertTrue(done); } AppendTestUtil.LOG.info("Lease for file " + filepath + " is recovered. " + "Validating its contents now..."); // verify that file-size matches long fileSize = dfs.getFileStatus(filepath).getLen(); assertTrue(fileSize == size, "File should be " + size + " bytes, but is actually " + " found to be " + fileSize + " bytes"); // verify data AppendTestUtil.LOG.info("File size is good. " + "Now validating data and sizes from datanodes..."); AppendTestUtil.checkFullFile(dfs, filepath, size, buffer, filestr); } /** * This test makes it so the client does not renew its lease and also * set the hard lease expiration period to be short, thus triggering * lease expiration to happen while the client is still alive. The test * also causes the NN to restart after lease recovery has begun, but before * the DNs have completed the blocks. This test verifies that when the NN * comes back up, the client no longer holds the lease. * * The test makes sure that the lease recovery completes and the client * fails if it continues to write to the file, even after NN restart. * * @throws Exception */ @Test @Timeout(value = 60) public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception { hardLeaseRecoveryRestartHelper(false, -1); } @Test @Timeout(value = 60) public void testHardLeaseRecoveryAfterNameNodeRestart2() throws Exception { hardLeaseRecoveryRestartHelper(false, 1535); } @Test @Timeout(value = 60) public void testHardLeaseRecoveryWithRenameAfterNameNodeRestart() throws Exception { hardLeaseRecoveryRestartHelper(true, -1); } public void hardLeaseRecoveryRestartHelper(boolean doRename, int size) throws Exception { if (size < 0) { size = AppendTestUtil.nextInt(FILE_SIZE + 1); } //create a file String fileStr = "/hardLeaseRecovery"; AppendTestUtil.LOG.info("filestr=" + fileStr); Path filePath = new Path(fileStr); FSDataOutputStream stm = dfs.create(filePath, true, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE); assertTrue(dfs.dfs.exists(fileStr)); // write bytes into the file. AppendTestUtil.LOG.info("size=" + size); stm.write(buffer, 0, size); String originalLeaseHolder = NameNodeAdapter.getLeaseHolderForPath( cluster.getNameNode(), fileStr); assertFalse(originalLeaseHolder.startsWith(HdfsServerConstants.NAMENODE_LEASE_HOLDER), "original lease holder should not be the NN"); // hflush file AppendTestUtil.LOG.info("hflush"); stm.hflush(); // check visible length final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath); assertEquals(size, in.getVisibleLength()); in.close(); if (doRename) { fileStr += ".renamed"; Path renamedPath = new Path(fileStr); assertTrue(dfs.rename(filePath, renamedPath)); filePath = renamedPath; } // kill the lease renewal thread AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()"); dfs.dfs.getLeaseRenewer().interruptAndJoin(); // Make sure the DNs don't send a heartbeat for a while, so the blocks // won't actually get completed during lease recovery. for (DataNode dn : cluster.getDataNodes()) { DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true); } // set the hard limit to be 1 second cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD); // Normally, the in-progress edit log would be finalized by // FSEditLog#endCurrentLogSegment. For testing purposes, we // disable that here. FSEditLog spyLog = spy(cluster.getNameNode().getFSImage().getEditLog()); doNothing().when(spyLog).endCurrentLogSegment(Mockito.anyBoolean()); DFSTestUtil.setEditLogForTesting(cluster.getNamesystem(), spyLog); // Make sure lease recovery begins. final String path = fileStr; GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { String holder = NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), path); return holder!=null && holder .startsWith(HdfsServerConstants.NAMENODE_LEASE_HOLDER); } }, (int)SHORT_LEASE_PERIOD, (int)SHORT_LEASE_PERIOD * 20); cluster.restartNameNode(false); checkLease(fileStr, size); // Let the DNs send heartbeats again. for (DataNode dn : cluster.getDataNodes()) { DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false); } cluster.waitActive(); // set the hard limit to be 1 second, to initiate lease recovery. cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD); // wait for lease recovery to complete LocatedBlocks locatedBlocks; do { Thread.sleep(SHORT_LEASE_PERIOD); locatedBlocks = dfs.dfs.getLocatedBlocks(fileStr, 0L, size); } while (locatedBlocks.isUnderConstruction()); assertEquals(size, locatedBlocks.getFileLength()); // make sure that the client can't write data anymore. try { stm.write('b'); stm.hflush(); fail("Should not be able to flush after we've lost the lease"); } catch (IOException e) { LOG.info("Expceted exception on write/hflush", e); } try { stm.close(); fail("Should not be able to close after we've lost the lease"); } catch (IOException e) { LOG.info("Expected exception on close", e); } // verify data AppendTestUtil.LOG.info( "File size is good. Now validating sizes from datanodes..."); AppendTestUtil.checkFullFile(dfs, filePath, size, buffer, fileStr); } static void checkLease(String f, int size) { final String holder = NameNodeAdapter.getLeaseHolderForPath( cluster.getNameNode(), f); if (size == 0) { assertEquals(null, holder, "lease holder should null, file is closed"); } else { assertTrue(holder.startsWith(HdfsServerConstants.NAMENODE_LEASE_HOLDER), "lease holder should now be the NN"); } } }
TestLeaseRecovery2
java
elastic__elasticsearch
server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskStorageRetryIT.java
{ "start": 1354, "end": 4536 }
class ____ extends ESSingleNodeTestCase { @Override protected Collection<Class<? extends Plugin>> getPlugins() { return Arrays.asList(TestTaskPlugin.class); } /** * Lower the queue sizes to be small enough that both bulk and searches will time out and have to be retried. */ @Override protected Settings nodeSettings() { return Settings.builder().put(super.nodeSettings()).put("thread_pool.write.size", 2).put("thread_pool.write.queue_size", 0).build(); } public void testRetry() throws Exception { logger.info("block the write executor"); CyclicBarrier barrier = new CyclicBarrier(2); getInstanceFromNode(ThreadPool.class).executor(ThreadPool.Names.WRITE).execute(() -> { try { barrier.await(); logger.info("blocking the write executor"); barrier.await(); logger.info("unblocked the write executor"); } catch (Exception e) { throw new RuntimeException(e); } }); barrier.await(); Task task; PlainActionFuture<TestTaskPlugin.NodesResponse> future = new PlainActionFuture<>(); try { logger.info("start a task that will store its results"); TestTaskPlugin.NodesRequest req = new TestTaskPlugin.NodesRequest("foo"); req.setShouldStoreResult(true); req.setShouldBlock(false); task = nodeClient().executeLocally(TestTaskPlugin.TEST_TASK_ACTION, req, future); logger.info("verify that the task has started and is still running"); assertBusy(() -> { GetTaskResponse runningTask = clusterAdmin().prepareGetTask(new TaskId(nodeClient().getLocalNodeId(), task.getId())).get(); assertNotNull(runningTask.getTask()); assertFalse(runningTask.getTask().isCompleted()); assertEquals(emptyMap(), runningTask.getTask().getErrorAsMap()); assertEquals(emptyMap(), runningTask.getTask().getResponseAsMap()); assertFalse(future.isDone()); }); } finally { logger.info("unblock the write executor"); barrier.await(); } logger.info("wait for the task to finish"); future.get(10, TimeUnit.SECONDS); logger.info("check that it was written successfully"); GetTaskResponse finishedTask = clusterAdmin().prepareGetTask(new TaskId(nodeClient().getLocalNodeId(), task.getId())).get(); assertTrue(finishedTask.getTask().isCompleted()); assertEquals(emptyMap(), finishedTask.getTask().getErrorAsMap()); assertEquals(singletonMap("failure_count", 0), finishedTask.getTask().getResponseAsMap()); } /** * Get the {@linkplain NodeClient} local to the node being tested. */ private NodeClient nodeClient() { /* * Luckilly our test infrastructure already returns it, but we can't * change the return type in the superclass because it is wrapped other * places. */ return (NodeClient) client(); } }
TaskStorageRetryIT
java
apache__camel
components/camel-aws/camel-aws2-cw/src/main/java/org/apache/camel/component/aws2/cw/Cw2Configuration.java
{ "start": 1206, "end": 9257 }
class ____ implements Cloneable { @UriPath @Metadata(required = true) private String namespace; @UriParam @Metadata(label = "advanced", autowired = true) private CloudWatchClient amazonCwClient; @UriParam(label = "security", secret = true) private String accessKey; @UriParam(label = "security", secret = true) private String secretKey; @UriParam(label = "security", secret = true) private String sessionToken; @UriParam private String name; @UriParam private Double value; @UriParam private String unit; @UriParam private Instant timestamp; @UriParam(label = "proxy", enums = "HTTP,HTTPS", defaultValue = "HTTPS") private Protocol proxyProtocol = Protocol.HTTPS; @UriParam(label = "proxy") private String proxyHost; @UriParam(label = "proxy") private Integer proxyPort; @UriParam(enums = "ap-south-2,ap-south-1,eu-south-1,eu-south-2,us-gov-east-1,me-central-1,il-central-1,ca-central-1,eu-central-1,us-iso-west-1,eu-central-2,eu-isoe-west-1,us-west-1,us-west-2,af-south-1,eu-north-1,eu-west-3,eu-west-2,eu-west-1,ap-northeast-3,ap-northeast-2,ap-northeast-1,me-south-1,sa-east-1,ap-east-1,cn-north-1,ca-west-1,us-gov-west-1,ap-southeast-1,ap-southeast-2,us-iso-east-1,ap-southeast-3,ap-southeast-4,us-east-1,us-east-2,cn-northwest-1,us-isob-east-1,aws-global,aws-cn-global,aws-us-gov-global,aws-iso-global,aws-iso-b-global") private String region; @UriParam(label = "security") private boolean trustAllCertificates; @UriParam(defaultValue = "false") private boolean overrideEndpoint; @UriParam private String uriEndpointOverride; @UriParam(label = "security") private boolean useDefaultCredentialsProvider; @UriParam(label = "security") private boolean useProfileCredentialsProvider; @UriParam(label = "security") private boolean useSessionCredentials; @UriParam(label = "security") private String profileCredentialsName; public String getAccessKey() { return accessKey; } /** * Amazon AWS Access Key */ public void setAccessKey(String accessKey) { this.accessKey = accessKey; } public String getSecretKey() { return secretKey; } /** * Amazon AWS Secret Key */ public void setSecretKey(String secretKey) { this.secretKey = secretKey; } public String getSessionToken() { return sessionToken; } /** * Amazon AWS Session Token used when the user needs to assume an IAM role */ public void setSessionToken(String sessionToken) { this.sessionToken = sessionToken; } public String getName() { return name; } /** * The metric name */ public void setName(String name) { this.name = name; } public Double getValue() { return value; } /** * The metric value */ public void setValue(Double value) { this.value = value; } public String getUnit() { return unit; } /** * The metric unit */ public void setUnit(String unit) { this.unit = unit; } public String getNamespace() { return namespace; } /** * The metric namespace */ public void setNamespace(String namespace) { this.namespace = namespace; } /** * The metric timestamp */ public void setTimestamp(Instant timestamp) { this.timestamp = timestamp; } public Instant getTimestamp() { return timestamp; } public CloudWatchClient getAmazonCwClient() { return amazonCwClient; } /** * To use the AmazonCloudWatch as the client */ public void setAmazonCwClient(CloudWatchClient amazonCwClient) { this.amazonCwClient = amazonCwClient; } public Protocol getProxyProtocol() { return proxyProtocol; } /** * To define a proxy protocol when instantiating the CW client */ public void setProxyProtocol(Protocol proxyProtocol) { this.proxyProtocol = proxyProtocol; } public String getProxyHost() { return proxyHost; } /** * To define a proxy host when instantiating the CW client */ public void setProxyHost(String proxyHost) { this.proxyHost = proxyHost; } public Integer getProxyPort() { return proxyPort; } /** * To define a proxy port when instantiating the CW client */ public void setProxyPort(Integer proxyPort) { this.proxyPort = proxyPort; } public String getRegion() { return region; } /** * The region in which CW client needs to work. When using this parameter, the configuration will expect the * lowercase name of the region (for example, ap-east-1) You'll need to use the name Region.EU_WEST_1.id() */ public void setRegion(String region) { this.region = region; } public boolean isTrustAllCertificates() { return trustAllCertificates; } /** * If we want to trust all certificates in case of overriding the endpoint */ public void setTrustAllCertificates(boolean trustAllCertificates) { this.trustAllCertificates = trustAllCertificates; } public boolean isOverrideEndpoint() { return overrideEndpoint; } /** * Set the need for overriding the endpoint. This option needs to be used in combination with the * uriEndpointOverride option */ public void setOverrideEndpoint(boolean overrideEndpoint) { this.overrideEndpoint = overrideEndpoint; } public String getUriEndpointOverride() { return uriEndpointOverride; } /** * Set the overriding uri endpoint. This option needs to be used in combination with overrideEndpoint option */ public void setUriEndpointOverride(String uriEndpointOverride) { this.uriEndpointOverride = uriEndpointOverride; } /** * Set whether the S3 client should expect to load credentials through a default credentials provider or to expect * static credentials to be passed in. */ public void setUseDefaultCredentialsProvider(Boolean useDefaultCredentialsProvider) { this.useDefaultCredentialsProvider = useDefaultCredentialsProvider; } public Boolean isUseDefaultCredentialsProvider() { return useDefaultCredentialsProvider; } public boolean isUseProfileCredentialsProvider() { return useProfileCredentialsProvider; } /** * Set whether the Cloudwatch client should expect to load credentials through a profile credentials provider. */ public void setUseProfileCredentialsProvider(boolean useProfileCredentialsProvider) { this.useProfileCredentialsProvider = useProfileCredentialsProvider; } public boolean isUseSessionCredentials() { return useSessionCredentials; } /** * Set whether the CloudWatch client should expect to use Session Credentials. This is useful in a situation in * which the user needs to assume an IAM role for doing operations in CloudWatch. */ public void setUseSessionCredentials(boolean useSessionCredentials) { this.useSessionCredentials = useSessionCredentials; } public String getProfileCredentialsName() { return profileCredentialsName; } /** * If using a profile credentials provider, this parameter will set the profile name */ public void setProfileCredentialsName(String profileCredentialsName) { this.profileCredentialsName = profileCredentialsName; } // ************************************************* // // ************************************************* public Cw2Configuration copy() { try { return (Cw2Configuration) super.clone(); } catch (CloneNotSupportedException e) { throw new RuntimeCamelException(e); } } }
Cw2Configuration
java
elastic__elasticsearch
test/framework/src/main/java/org/elasticsearch/indices/EmptySystemIndices.java
{ "start": 750, "end": 945 }
class ____ extends SystemIndices { public static final SystemIndices INSTANCE = new EmptySystemIndices(); private EmptySystemIndices() { super(List.of()); } }
EmptySystemIndices
java
FasterXML__jackson-databind
src/main/java/tools/jackson/databind/introspect/AnnotatedMethodMap.java
{ "start": 119, "end": 300 }
class ____ to keep track of collection of * {@link AnnotatedMethod}s, accessible by lookup. Lookup * is usually needed for augmenting and overriding annotations. */ public final
used
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/mapping/PrimaryKey.java
{ "start": 417, "end": 2437 }
class ____ extends Constraint { private UniqueKey orderingUniqueKey = null; private int[] originalOrder; public PrimaryKey(Table table) { super( table ); } @Deprecated(since = "7") public PrimaryKey() { } @Override public void addColumn(Column column) { // force primary key columns to not-null for ( var next : getTable().getColumns() ) { if ( next.getCanonicalName().equals( column.getCanonicalName() ) ) { next.setNullable( false ); } } super.addColumn( column ); } @Override public String getExportIdentifier() { return qualify( getTable().getExportIdentifier(), "PK-" + getName() ); } public List<Column> getColumnsInOriginalOrder() { final var columns = getColumns(); if ( originalOrder == null ) { return columns; } final var columnsInOriginalOrder = new Column[columns.size()]; for ( int i = 0; i < columnsInOriginalOrder.length; i++ ) { columnsInOriginalOrder[originalOrder[i]] = columns.get( i ); } return asList( columnsInOriginalOrder ); } public void setOrderingUniqueKey(UniqueKey uniqueKey) { orderingUniqueKey = uniqueKey; } public UniqueKey getOrderingUniqueKey() { return orderingUniqueKey; } @Internal public void reorderColumns(List<Column> reorderedColumns) { final var columns = getColumns(); if ( originalOrder != null ) { assert columns.equals( reorderedColumns ); } else { assert columns.size() == reorderedColumns.size() && columns.containsAll( reorderedColumns ); originalOrder = new int[columns.size()]; final var orderingUniqueKey = getOrderingUniqueKey(); final var newColumns = orderingUniqueKey != null ? orderingUniqueKey.getColumns() : reorderedColumns; for ( int i = 0; i < newColumns.size(); i++ ) { final var reorderedColumn = newColumns.get( i ); originalOrder[i] = columns.indexOf( reorderedColumn ); } columns.clear(); columns.addAll( newColumns ); } } @Internal public int[] getOriginalOrder() { return originalOrder; } }
PrimaryKey
java
spring-projects__spring-framework
spring-core/src/test/java/org/springframework/core/io/ResourceTests.java
{ "start": 21576, "end": 23013 }
class ____ { @Test void missingResourceIsNotReadable() { final String name = "test-resource"; Resource resource = new AbstractResource() { @Override public String getDescription() { return name; } @Override public InputStream getInputStream() throws IOException { throw new FileNotFoundException(); } }; assertThatExceptionOfType(FileNotFoundException.class).isThrownBy(resource::getURL) .withMessageContaining(name); assertThatExceptionOfType(FileNotFoundException.class).isThrownBy(resource::getFile) .withMessageContaining(name); assertThatExceptionOfType(FileNotFoundException.class).isThrownBy(() -> resource.createRelative("/testing")).withMessageContaining(name); assertThatExceptionOfType(FileNotFoundException.class).isThrownBy(resource::getContentAsByteArray); assertThatExceptionOfType(FileNotFoundException.class).isThrownBy( () -> resource.getContentAsString(StandardCharsets.US_ASCII)); assertThat(resource.getFilename()).isNull(); } @Test void hasContentLength() throws Exception { AbstractResource resource = new AbstractResource() { @Override public InputStream getInputStream() { return new ByteArrayInputStream(new byte[] {'a', 'b', 'c'}); } @Override public String getDescription() { return ""; } }; assertThat(resource.contentLength()).isEqualTo(3L); } } }
AbstractResourceTests
java
elastic__elasticsearch
x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlNodeFailureIT.java
{ "start": 1666, "end": 8028 }
class ____ extends AbstractEsqlIntegTestCase { @Override protected Collection<Class<? extends Plugin>> nodePlugins() { var plugins = new ArrayList<>(super.nodePlugins()); plugins.add(FailingFieldPlugin.class); plugins.add(InternalExchangePlugin.class); return plugins; } @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings settings = Settings.builder() .put(super.nodeSettings(nodeOrdinal, otherSettings)) .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(3000, 4000))) .build(); logger.info("settings {}", settings); return settings; } public Set<String> populateIndices() throws Exception { XContentBuilder mapping = JsonXContent.contentBuilder().startObject(); mapping.startObject("runtime"); { mapping.startObject("fail_me"); { mapping.field("type", "long"); mapping.startObject("script").field("source", "").field("lang", "failing_field").endObject(); } mapping.endObject(); } mapping.endObject(); client().admin().indices().prepareCreate("fail").setMapping(mapping.endObject()).get(); int okCount = between(1, 50); Set<String> okIds = new HashSet<>(); List<IndexRequestBuilder> docs = new ArrayList<>(okCount); for (int d = 0; d < okCount; d++) { String id = "ok-" + d; okIds.add(id); docs.add(client().prepareIndex("ok").setId(id).setSource("foo", d)); } int failCount = between(1, 50); for (int d = 0; d < failCount; d++) { docs.add(client().prepareIndex("fail").setId("fail-" + d).setSource("foo", d)); } indexRandom(true, docs); return okIds; } /** * Use a runtime field that fails when loading field values to fail the entire query. */ public void testFailureLoadingFields() throws Exception { populateIndices(); IllegalStateException e = expectThrows(IllegalStateException.class, () -> run("FROM fail,ok | LIMIT 100").close()); assertThat(e.getMessage(), equalTo("Accessing failing field")); } public void testPartialResults() throws Exception { Set<String> okIds = populateIndices(); EsqlQueryRequest request = new EsqlQueryRequest(); request.query("FROM fail,ok METADATA _id | KEEP _id, fail_me | LIMIT 100"); request.allowPartialResults(true); // have to run one shard at a time to avoid failing all shards QueryPragmas pragma = new QueryPragmas( Settings.builder().put(randomPragmas().getSettings()).put(QueryPragmas.MAX_CONCURRENT_SHARDS_PER_NODE.getKey(), 1).build() ); request.pragmas(pragma); request.acceptedPragmaRisks(true); try (EsqlQueryResponse resp = run(request)) { assertTrue(resp.isPartial()); List<List<Object>> rows = EsqlTestUtils.getValuesList(resp); assertThat(rows.size(), equalTo(okIds.size())); Set<String> actualIds = new HashSet<>(); for (List<Object> row : rows) { assertThat(row.size(), equalTo(2)); String id = (String) row.getFirst(); assertThat(id, in(okIds)); assertTrue(actualIds.add(id)); } EsqlExecutionInfo.Cluster localInfo = resp.getExecutionInfo().getCluster(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY); assertThat(localInfo.getFailures(), not(empty())); assertThat(localInfo.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.PARTIAL)); assertThat(localInfo.getFailures().get(0).reason(), containsString("Accessing failing field")); } } public void testDefaultPartialResults() throws Exception { Set<String> okIds = populateIndices(); assertAcked( client().admin() .cluster() .prepareUpdateSettings(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS) .setPersistentSettings(Settings.builder().put(EsqlPlugin.QUERY_ALLOW_PARTIAL_RESULTS.getKey(), true)) ); try { // allow_partial_results = default { EsqlQueryRequest request = new EsqlQueryRequest(); request.query("FROM fail,ok | LIMIT 100"); request.pragmas(randomPragmas()); // have to run one shard at a time to avoid failing all shards QueryPragmas pragma = new QueryPragmas( Settings.builder() .put(randomPragmas().getSettings()) .put(QueryPragmas.MAX_CONCURRENT_SHARDS_PER_NODE.getKey(), 1) .build() ); request.pragmas(pragma); request.acceptedPragmaRisks(true); if (randomBoolean()) { request.allowPartialResults(true); } try (EsqlQueryResponse resp = run(request)) { assertTrue(resp.isPartial()); List<List<Object>> rows = EsqlTestUtils.getValuesList(resp); assertThat(rows.size(), lessThanOrEqualTo(okIds.size())); assertThat(rows.size(), greaterThan(0)); } } // allow_partial_results = false { EsqlQueryRequest request = new EsqlQueryRequest(); request.query("FROM fail,ok | LIMIT 100"); request.pragmas(randomPragmas()); request.allowPartialResults(false); IllegalStateException e = expectThrows(IllegalStateException.class, () -> run(request).close()); assertThat(e.getMessage(), equalTo("Accessing failing field")); } } finally { assertAcked( client().admin() .cluster() .prepareUpdateSettings(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS) .setPersistentSettings(Settings.builder().putNull(EsqlPlugin.QUERY_ALLOW_PARTIAL_RESULTS.getKey())) ); } } }
EsqlNodeFailureIT
java
apache__avro
lang/java/grpc/src/main/java/org/apache/avro/grpc/AvroRequestMarshaller.java
{ "start": 1453, "end": 2696 }
class ____ implements MethodDescriptor.Marshaller<Object[]> { private static final EncoderFactory ENCODER_FACTORY = new EncoderFactory(); private static final DecoderFactory DECODER_FACTORY = new DecoderFactory(); private final Protocol.Message message; public AvroRequestMarshaller(Protocol.Message message) { this.message = message; } @Override public InputStream stream(Object[] value) { return new AvroRequestInputStream(value, message); } @Override public Object[] parse(InputStream stream) { try { BinaryDecoder in = DECODER_FACTORY.binaryDecoder(stream, null); Schema reqSchema = message.getRequest(); GenericRecord request = (GenericRecord) new SpecificDatumReader<>(reqSchema).read(null, in); Object[] args = new Object[reqSchema.getFields().size()]; int i = 0; for (Schema.Field field : reqSchema.getFields()) { args[i++] = request.get(field.name()); } return args; } catch (IOException e) { throw Status.INTERNAL.withCause(e).withDescription("Error deserializing avro request arguments") .asRuntimeException(); } finally { AvroGrpcUtils.skipAndCloseQuietly(stream); } } private static
AvroRequestMarshaller
java
alibaba__nacos
persistence/src/main/java/com/alibaba/nacos/persistence/datasource/ExternalDataSourceServiceImpl.java
{ "start": 9902, "end": 11291 }
class ____ implements Runnable { @Override public void run() { if (LOGGER.isDebugEnabled()) { LOGGER.debug("check db health."); } String sql = "SELECT * FROM config_info_gray WHERE id = 1"; for (int i = 0; i < testJtList.size(); i++) { JdbcTemplate jdbcTemplate = testJtList.get(i); try { try { jdbcTemplate.queryForMap(sql); } catch (EmptyResultDataAccessException e) { // do nothing. } isHealthList.set(i, Boolean.TRUE); } catch (DataAccessException e) { if (i == masterIndex) { LOGGER.error("[db-error] master db {} down.", InternetAddressUtil.getIpFromString(dataSourceList.get(i).getJdbcUrl())); } else { LOGGER.error("[db-error] slave db {} down.", InternetAddressUtil.getIpFromString(dataSourceList.get(i).getJdbcUrl())); } isHealthList.set(i, Boolean.FALSE); DatasourceMetrics.getDbException().increment(); } } } } }
CheckDbHealthTask
java
elastic__elasticsearch
x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/CommandLicenseTests.java
{ "start": 2495, "end": 9768 }
class ____ { XPackLicenseState basicLicense = makeLicenseState(License.OperationMode.BASIC); XPackLicenseState platinumLicense = makeLicenseState(License.OperationMode.PLATINUM); XPackLicenseState enterpriseLicense = makeLicenseState(License.OperationMode.ENTERPRISE); private XPackLicenseState licenseLevel(LicenseAware licenseAware) { for (XPackLicenseState license : List.of(basicLicense, platinumLicense, enterpriseLicense)) { if (licenseAware.licenseCheck(license)) { return license; } } throw new IllegalArgumentException("No license level is supported by " + licenseAware.getClass().getName()); } } private static XPackLicenseState makeLicenseState(License.OperationMode mode) { return new XPackLicenseState(System::currentTimeMillis, new XPackLicenseStatus(mode, true, null)); } private static void checkLicense(String commandName, LogicalPlan command) throws Exception { log.info("Running function license checks"); TestCheckLicense checkLicense = new TestCheckLicense(); ObservabilityTier observabilityTier = null; SupportsObservabilityTier supportsObservabilityTier = command.getClass().getAnnotation(SupportsObservabilityTier.class); if (supportsObservabilityTier != null) { observabilityTier = supportsObservabilityTier.tier(); } if (command instanceof LicenseAware licenseAware) { log.info("Command " + commandName + " implements LicenseAware."); saveLicenseState(commandName, command, checkLicense.licenseLevel(licenseAware), observabilityTier); } else { log.info("Command " + commandName + " does not implement LicenseAware."); saveLicenseState(commandName, command, checkLicense.basicLicense, observabilityTier); } } private static void saveLicenseState( String name, LogicalPlan command, XPackLicenseState licenseState, SupportsObservabilityTier.ObservabilityTier observabilityTier ) throws Exception { DocsV3Support.CommandsDocsSupport docs = new DocsV3Support.CommandsDocsSupport( name.toLowerCase(Locale.ROOT), CommandLicenseTests.class, command, licenseState, observabilityTier, DocsV3Support.callbacksFromSystemProperty() ); docs.renderDocs(); } // Find all command classes, by looking at the public methods of the EsqlBaseParserVisitor private static Map<String, Class<? extends LogicalPlan>> getCommandClasses() { Map<String, Class<? extends LogicalPlan>> commandClasses = new TreeMap<>(); Pattern pattern = Pattern.compile("visit(\\w+)Command"); String planPackage = "org.elasticsearch.xpack.esql.plan.logical"; Map<String, String> commandClassNameMapper = Map.of( "Where", "Filter", "Inline", "InlineStats", "Rrf", "RrfScoreEval", "Sort", "OrderBy", "Stats", "Aggregate", "Join", "LookupJoin" ); Map<String, String> commandNameMapper = Map.of( "ChangePoint", "CHANGE_POINT", "LookupJoin", "LOOKUP_JOIN", "MvExpand", "MV_EXPAND", "InlineStats", "INLINE_STATS" ); Map<String, String> commandPackageMapper = Map.of("Rerank", planPackage + ".inference", "LookupJoin", planPackage + ".join"); Set<String> ignoredClasses = Set.of("Processing", "TimeSeries", "Completion", "Source", "From", "Row"); for (Method method : EsqlBaseParserVisitor.class.getMethods()) { String methodName = method.getName(); Matcher matcher = pattern.matcher(methodName); if (matcher.matches()) { String className = matcher.group(1); if (ignoredClasses.contains(className)) { continue; } String commandName = commandNameMapper.getOrDefault(className, className.toUpperCase(Locale.ROOT)); if (commandClassNameMapper.containsKey(className)) { className = commandClassNameMapper.get(className); if (commandNameMapper.containsKey(className)) { commandName = commandNameMapper.get(className); } } try { String fullClassName = commandPackageMapper.getOrDefault(className, planPackage) + "." + className; Class<?> candidateClass = Class.forName(fullClassName); if (LogicalPlan.class.isAssignableFrom(candidateClass)) { commandClasses.put(commandName, candidateClass.asSubclass(LogicalPlan.class)); } else { log.info("Class " + className + " does NOT extend LogicalPlan."); } } catch (ClassNotFoundException e) { log.info("Class " + className + " not found."); } } } return commandClasses; } private static LogicalPlan createInstance(Class<? extends LogicalPlan> clazz, LogicalPlan child) throws InvocationTargetException, InstantiationException, IllegalAccessException { Source source = Source.EMPTY; // hard coded cases where the first two parameters are not Source and child LogicalPlan switch (clazz.getSimpleName()) { case "Grok" -> { return new Grok(source, child, null, null, List.of()); } case "Fork" -> { return new Fork(source, List.of(child, child), List.of()); } case "Sample" -> { return new Sample(source, null, child); } case "LookupJoin" -> { return new LookupJoin(source, child, child, List.of(), false, null); } case "Limit" -> { return new Limit(source, null, child); } } // For all others, find the constructor that takes Source and LogicalPlan as the first two parameters Constructor<?>[] constructors = clazz.getConstructors(); Constructor<?> constructor = Arrays.stream(constructors).filter(c -> { Class<?>[] params = c.getParameterTypes(); return params.length > 1 && Source.class.isAssignableFrom(params[0]) && LogicalPlan.class.isAssignableFrom(params[1]); }) .min(Comparator.comparingInt(c -> c.getParameterTypes().length)) .orElseThrow(() -> new IllegalArgumentException("No suitable constructor found for class " + clazz.getName())); @SuppressWarnings("unchecked") Object[] args = EsqlNodeSubclassTests.ctorArgs((Constructor<? extends Node<?>>) constructor); args[1] = child; log.info("Creating instance of " + clazz.getName() + " with constructor: " + constructor); return (LogicalPlan) constructor.newInstance(args); } }
TestCheckLicense
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/embeddable/XmlWithArrayEmbeddableTest.java
{ "start": 13343, "end": 13958 }
class ____ { @Id private Long id; @JdbcTypeCode(SqlTypes.SQLXML) private EmbeddableWithArrayAggregate aggregate; //Getters and setters are omitted for brevity public XmlHolder() { } public XmlHolder(Long id, EmbeddableWithArrayAggregate aggregate) { this.id = id; this.aggregate = aggregate; } public Long getId() { return id; } public void setId(Long id) { this.id = id; } public EmbeddableWithArrayAggregate getAggregate() { return aggregate; } public void setAggregate(EmbeddableWithArrayAggregate aggregate) { this.aggregate = aggregate; } } }
XmlHolder
java
apache__flink
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/MainThreadExecutable.java
{ "start": 1139, "end": 1349 }
interface ____ intended to be implemented by the self gateway in a {@link RpcEndpoint} * implementation which allows to dispatch local procedures to the main thread of the underlying RPC * endpoint. */ public
is
java
apache__camel
core/camel-core/src/test/java/org/apache/camel/component/bean/BeanOverloadedMethodFQNTest.java
{ "start": 5398, "end": 5602 }
class ____ { public String order(MyOrder order) { return "OK"; } public String order(MyOrder order, Boolean gold) { return "OK;GOLD"; } } }
MyBean
java
spring-projects__spring-boot
core/spring-boot-test/src/test/java/org/springframework/boot/test/context/AnnotationsPropertySourceTests.java
{ "start": 12367, "end": 12500 }
interface ____ { String value() default "bar"; } @Retention(RetentionPolicy.RUNTIME) @SelfAnnotating @
AliasedAttributeAnnotation
java
hibernate__hibernate-orm
hibernate-vector/src/main/java/org/hibernate/vector/internal/PGVectorJdbcLiteralFormatterVector.java
{ "start": 468, "end": 1325 }
class ____<T> extends BasicJdbcLiteralFormatter<T> { private final JdbcLiteralFormatter<Object> elementFormatter; public PGVectorJdbcLiteralFormatterVector(JavaType<T> javaType, JdbcLiteralFormatter<?> elementFormatter) { super( javaType ); //noinspection unchecked this.elementFormatter = (JdbcLiteralFormatter<Object>) elementFormatter; } @Override public void appendJdbcLiteral(SqlAppender appender, T value, Dialect dialect, WrapperOptions wrapperOptions) { final Object[] objects = unwrap( value, Object[].class, wrapperOptions ); appender.appendSql( "cast('" ); char separator = '['; for ( Object o : objects ) { appender.appendSql( separator ); elementFormatter.appendJdbcLiteral( appender, o, dialect, wrapperOptions ); separator = ','; } appender.appendSql( "]' as vector)" ); } }
PGVectorJdbcLiteralFormatterVector
java
spring-projects__spring-framework
spring-web/src/main/java/org/springframework/http/server/observation/ServerHttpObservationDocumentation.java
{ "start": 1079, "end": 1231 }
class ____ used by automated tools to document KeyValues attached to the * HTTP server observations. * * @author Brian Clozel * @since 6.0 */ public
is
java
apache__flink
flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/api/TableEnvironment.java
{ "start": 28411, "end": 28623 }
class ____ and user defined resource uri. * * <p>Compared to {@link #createTemporaryFunction(String, Class)}, this method allows * registering a user defined function by only providing a full path
name
java
hibernate__hibernate-orm
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/sameids/SameIdTestEntity1.java
{ "start": 330, "end": 1371 }
class ____ { @Id private Integer id; @Audited private String str1; public SameIdTestEntity1() { } public SameIdTestEntity1(String str1) { this.str1 = str1; } public SameIdTestEntity1(Integer id, String str1) { this.id = id; this.str1 = str1; } public Integer getId() { return id; } public void setId(Integer id) { this.id = id; } public String getStr1() { return str1; } public void setStr1(String str1) { this.str1 = str1; } public boolean equals(Object o) { if ( this == o ) { return true; } if ( !(o instanceof SameIdTestEntity1) ) { return false; } SameIdTestEntity1 that = (SameIdTestEntity1) o; if ( id != null ? !id.equals( that.id ) : that.id != null ) { return false; } if ( str1 != null ? !str1.equals( that.str1 ) : that.str1 != null ) { return false; } return true; } public int hashCode() { int result; result = (id != null ? id.hashCode() : 0); result = 31 * result + (str1 != null ? str1.hashCode() : 0); return result; } }
SameIdTestEntity1
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java
{ "start": 3341, "end": 5922 }
class ____ the {@link DefaultLinuxContainerRuntime} specifically * for containers which run Java commands. It generates a new java security * policy file per container and modifies the java command to enable the * Java Security Manager with the generated policy.</p> * * The behavior of the {@link JavaSandboxLinuxContainerRuntime} can be modified * using the following settings: * * <ul> * <li> * {@value * org.apache.hadoop.yarn.conf.YarnConfiguration#YARN_CONTAINER_SANDBOX} : * This yarn-site.xml setting has three options: * <ul> * <li>disabled - Default behavior. {@link LinuxContainerRuntime} * is disabled</li> * <li>permissive - JVM containers will run with Java Security Manager * enabled. Non-JVM containers will run normally</li> * <li>enforcing - JVM containers will run with Java Security Manager * enabled. Non-JVM containers will be prevented from executing and an * {@link ContainerExecutionException} will be thrown.</li> * </ul> * </li> * <li> * {@value * org.apache.hadoop.yarn.conf.YarnConfiguration#YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS} * : * Determines the file permissions for the application directories. The * permissions come in the form of comma separated values * (e.g. read,write,execute,delete). Defaults to {@code read} for read-only. * </li> * <li> * {@value * org.apache.hadoop.yarn.conf.YarnConfiguration#YARN_CONTAINER_SANDBOX_POLICY} * : * Accepts canonical path to a java policy file on the local filesystem. * This file will be loaded as the base policy, any additional container * grants will be appended to this base file. If not specified, the default * java.policy file provided with hadoop resources will be used. * </li> * <li> * {@value * org.apache.hadoop.yarn.conf.YarnConfiguration#YARN_CONTAINER_SANDBOX_WHITELIST_GROUP} * : * Optional setting to specify a YARN queue which will be exempt from the * sand-boxing process. * </li> * <li> * {@value * org.apache.hadoop.yarn.conf.YarnConfiguration#YARN_CONTAINER_SANDBOX_POLICY_GROUP_PREFIX}$groupName * : * Optional setting to map groups to java policy files. The value is a path * to the java policy file for $groupName. A user which is a member of * multiple groups with different policies will receive the superset of all * the permissions across their groups. * </li> * </ul> */ @InterfaceAudience.Private @InterfaceStability.Unstable public
extends
java
elastic__elasticsearch
distribution/tools/keystore-cli/src/test/java/org/elasticsearch/cli/keystore/AddFileKeyStoreCommandTests.java
{ "start": 1112, "end": 9598 }
class ____ extends KeyStoreCommandTestCase { @Override protected Command newCommand() { return new AddFileKeyStoreCommand() { @Override protected Environment createEnv(OptionSet options, ProcessInfo processInfo) throws UserException { return env; } }; } private Path createRandomFile() throws IOException { int length = randomIntBetween(10, 20); byte[] bytes = new byte[length]; for (int i = 0; i < length; ++i) { bytes[i] = randomByte(); } Path file = env.configDir().resolve(randomAlphaOfLength(16)); Files.write(file, bytes); return file; } private void addFile(KeyStoreWrapper keystore, String setting, Path file, String password) throws Exception { keystore.setFile(setting, Files.readAllBytes(file)); keystore.save(env.configDir(), password.toCharArray()); } public void testMissingCreateWithEmptyPasswordWhenPrompted() throws Exception { assumeFalse("Cannot create unprotected keystore on FIPS JVM", inFipsJvm()); String password = ""; Path file1 = createRandomFile(); terminal.addTextInput("y"); execute("foo", file1.toString()); assertSecureFile("foo", file1, password); } public void testMissingCreateWithEmptyPasswordWithoutPromptIfForced() throws Exception { assumeFalse("Cannot create unprotected keystore on FIPS JVM", inFipsJvm()); String password = ""; Path file1 = createRandomFile(); execute("-f", "foo", file1.toString()); assertSecureFile("foo", file1, password); } public void testMissingNoCreate() throws Exception { terminal.addSecretInput(randomFrom("", "keystorepassword")); terminal.addTextInput("n"); // explicit no execute("foo"); assertNull(KeyStoreWrapper.load(env.configDir())); } public void testOverwritePromptDefault() throws Exception { String password = "keystorepassword"; Path file = createRandomFile(); KeyStoreWrapper keystore = createKeystore(password); addFile(keystore, "foo", file, password); terminal.addSecretInput(password); terminal.addSecretInput(password); terminal.addTextInput(""); execute("foo", "path/dne"); assertSecureFile("foo", file, password); } public void testOverwritePromptExplicitNo() throws Exception { String password = "keystorepassword"; Path file = createRandomFile(); KeyStoreWrapper keystore = createKeystore(password); addFile(keystore, "foo", file, password); terminal.addSecretInput(password); terminal.addTextInput("n"); // explicit no execute("foo", "path/dne"); assertSecureFile("foo", file, password); } public void testOverwritePromptExplicitYes() throws Exception { String password = "keystorepassword"; Path file1 = createRandomFile(); KeyStoreWrapper keystore = createKeystore(password); addFile(keystore, "foo", file1, password); terminal.addSecretInput(password); terminal.addSecretInput(password); terminal.addTextInput("y"); Path file2 = createRandomFile(); execute("foo", file2.toString()); assertSecureFile("foo", file2, password); } public void testOverwriteForceShort() throws Exception { String password = "keystorepassword"; Path file1 = createRandomFile(); KeyStoreWrapper keystore = createKeystore(password); addFile(keystore, "foo", file1, password); Path file2 = createRandomFile(); terminal.addSecretInput(password); terminal.addSecretInput(password); execute("-f", "foo", file2.toString()); assertSecureFile("foo", file2, password); } public void testOverwriteForceLong() throws Exception { String password = "keystorepassword"; Path file1 = createRandomFile(); KeyStoreWrapper keystore = createKeystore(password); addFile(keystore, "foo", file1, password); Path file2 = createRandomFile(); terminal.addSecretInput(password); execute("--force", "foo", file2.toString()); assertSecureFile("foo", file2, password); } public void testForceDoesNotAlreadyExist() throws Exception { String password = "keystorepassword"; createKeystore(password); Path file = createRandomFile(); terminal.addSecretInput(password); execute("--force", "foo", file.toString()); assertSecureFile("foo", file, password); } public void testMissingSettingName() throws Exception { String password = "keystorepassword"; createKeystore(password); terminal.addSecretInput(password); UserException e = expectThrows(UserException.class, this::execute); assertEquals(ExitCodes.USAGE, e.exitCode); assertThat(e.getMessage(), containsString("Missing setting name")); } public void testMissingFileName() throws Exception { String password = "keystorepassword"; createKeystore(password); terminal.addSecretInput(password); UserException e = expectThrows(UserException.class, () -> execute("foo")); assertEquals(ExitCodes.USAGE, e.exitCode); assertThat(e.getMessage(), containsString("settings and filenames must come in pairs")); } public void testFileDNE() throws Exception { String password = "keystorepassword"; createKeystore(password); terminal.addSecretInput(password); UserException e = expectThrows(UserException.class, () -> execute("foo", "path/dne")); assertEquals(ExitCodes.IO_ERROR, e.exitCode); assertThat(e.getMessage(), containsString("File [path/dne] does not exist")); } public void testExtraArguments() throws Exception { String password = "keystorepassword"; createKeystore(password); Path file = createRandomFile(); terminal.addSecretInput(password); UserException e = expectThrows(UserException.class, () -> execute("foo", file.toString(), "bar")); assertEquals(e.getMessage(), ExitCodes.USAGE, e.exitCode); assertThat(e.getMessage(), containsString("settings and filenames must come in pairs")); } public void testIncorrectPassword() throws Exception { String password = "keystorepassword"; createKeystore(password); Path file = createRandomFile(); terminal.addSecretInput("thewrongkeystorepassword"); UserException e = expectThrows(UserException.class, () -> execute("foo", file.toString())); assertEquals(e.getMessage(), ExitCodes.DATA_ERROR, e.exitCode); if (inFipsJvm()) { assertThat( e.getMessage(), anyOf( containsString("Provided keystore password was incorrect"), containsString("Keystore has been corrupted or tampered with") ) ); } else { assertThat(e.getMessage(), containsString("Provided keystore password was incorrect")); } } public void testAddToUnprotectedKeystore() throws Exception { assumeFalse("Cannot open unprotected keystore on FIPS JVM", inFipsJvm()); String password = ""; Path file = createRandomFile(); KeyStoreWrapper keystore = createKeystore(password); addFile(keystore, "foo", file, password); terminal.addTextInput(""); // will not be prompted for a password execute("foo", "path/dne"); assertSecureFile("foo", file, password); } public void testAddMultipleFiles() throws Exception { final String password = "keystorepassword"; createKeystore(password); final int n = randomIntBetween(1, 8); final List<Tuple<String, Path>> settingFilePairs = new ArrayList<>(n); for (int i = 0; i < n; i++) { settingFilePairs.add(Tuple.tuple("foo" + i, createRandomFile())); } terminal.addSecretInput(password); execute(settingFilePairs.stream().flatMap(t -> Stream.of(t.v1(), t.v2().toString())).toArray(String[]::new)); for (int i = 0; i < n; i++) { assertSecureFile(settingFilePairs.get(i).v1(), settingFilePairs.get(i).v2(), password); } } }
AddFileKeyStoreCommandTests
java
apache__flink
flink-core/src/main/java/org/apache/flink/api/common/serialization/RuntimeContextInitializationContextAdapters.java
{ "start": 2422, "end": 3320 }
class ____ implements DeserializationSchema.InitializationContext { private final RuntimeContext runtimeContext; private final Function<MetricGroup, MetricGroup> mapMetricGroup; private RuntimeContextDeserializationInitializationContextAdapter( RuntimeContext runtimeContext, Function<MetricGroup, MetricGroup> mapMetricGroup) { this.runtimeContext = runtimeContext; this.mapMetricGroup = mapMetricGroup; } @Override public MetricGroup getMetricGroup() { return mapMetricGroup.apply(runtimeContext.getMetricGroup()); } @Override public UserCodeClassLoader getUserCodeClassLoader() { return new RuntimeContextUserCodeClassLoaderAdapter(runtimeContext); } } private static final
RuntimeContextDeserializationInitializationContextAdapter
java
micronaut-projects__micronaut-core
http-client/src/main/java/io/micronaut/http/client/netty/DefaultHttpClient.java
{ "start": 9869, "end": 102945 }
class ____ implements WebSocketClient, HttpClient, StreamingHttpClient, SseClient, ProxyHttpClient, RawHttpClient, Closeable, AutoCloseable { /** * Default logger, use {@link #log} where possible. */ private static final Logger DEFAULT_LOG = LoggerFactory.getLogger(DefaultHttpClient.class); private static final int DEFAULT_HTTP_PORT = 80; private static final int DEFAULT_HTTPS_PORT = 443; /** * Which headers <i>not</i> to copy from the first request when redirecting to a second request. There doesn't * appear to be a spec for this. {@link HttpURLConnection} seems to drop all headers, but that would be a * breaking change. * <p> * Stored as a {@link HttpHeaders} with empty values because presumably someone thought about optimizing those * already. */ private static final HttpHeaders REDIRECT_HEADER_BLOCKLIST; static { REDIRECT_HEADER_BLOCKLIST = new DefaultHttpHeaders(); // The host should be recalculated based on the location REDIRECT_HEADER_BLOCKLIST.add(HttpHeaderNames.HOST, ""); // post body headers REDIRECT_HEADER_BLOCKLIST.add(HttpHeaderNames.CONTENT_TYPE, ""); REDIRECT_HEADER_BLOCKLIST.add(HttpHeaderNames.CONTENT_LENGTH, ""); REDIRECT_HEADER_BLOCKLIST.add(HttpHeaderNames.TRANSFER_ENCODING, ""); REDIRECT_HEADER_BLOCKLIST.add(HttpHeaderNames.CONNECTION, ""); } protected MediaTypeCodecRegistry mediaTypeCodecRegistry; protected final ByteBufferFactory<ByteBufAllocator, ByteBuf> byteBufferFactory = new NettyByteBufferFactory(); ConnectionManager connectionManager; private MessageBodyHandlerRegistry handlerRegistry; private final List<HttpFilterResolver.FilterEntry> clientFilterEntries; private final LoadBalancer loadBalancer; private final HttpClientConfiguration configuration; private final String contextPath; private final Charset defaultCharset; private final Logger log; private final HttpClientFilterResolver<ClientFilterResolutionContext> filterResolver; private final WebSocketBeanRegistry webSocketRegistry; private final RequestBinderRegistry requestBinderRegistry; private final String informationalServiceId; private final ConversionService conversionService; @Nullable private final ExecutorService blockingExecutor; /** * Construct a client for the given arguments. * * @param loadBalancer The {@link LoadBalancer} to use for selecting servers * @param configuration The {@link HttpClientConfiguration} object * @param contextPath The base URI to prepend to request uris * @param threadFactory The thread factory to use for client threads * @param nettyClientSslBuilder The SSL builder * @param codecRegistry The {@link MediaTypeCodecRegistry} to use for encoding and decoding objects * @param handlerRegistry The handler registry for encoding and decoding * @param annotationMetadataResolver The annotation metadata resolver * @param conversionService The conversion service * @param filters The filters to use * @deprecated Please go through the {@link #builder()} instead. If you need access to properties that are not public in the builder, make them public in core and document their usage. */ @Deprecated public DefaultHttpClient(@Nullable LoadBalancer loadBalancer, @NonNull HttpClientConfiguration configuration, @Nullable String contextPath, @Nullable ThreadFactory threadFactory, ClientSslBuilder nettyClientSslBuilder, @NonNull MediaTypeCodecRegistry codecRegistry, @NonNull MessageBodyHandlerRegistry handlerRegistry, @Nullable AnnotationMetadataResolver annotationMetadataResolver, ConversionService conversionService, HttpClientFilter... filters) { this( builder() .loadBalancer(loadBalancer) .configuration(configuration) .contextPath(contextPath) .threadFactory(threadFactory) .nettyClientSslBuilder(nettyClientSslBuilder) .codecRegistry(codecRegistry) .handlerRegistry(handlerRegistry) .conversionService(conversionService) .annotationMetadataResolver(annotationMetadataResolver) .filters(filters) ); } /** * Construct a client for the given arguments. * @param loadBalancer The {@link LoadBalancer} to use for selecting servers * @param explicitHttpVersion The HTTP version to use. Can be null and defaults to {@link io.micronaut.http.HttpVersion#HTTP_1_1} * @param configuration The {@link HttpClientConfiguration} object * @param contextPath The base URI to prepend to request uris * @param filterResolver The http client filter resolver * @param clientFilterEntries The client filter entries * @param threadFactory The thread factory to use for client threads * @param nettyClientSslBuilder The SSL builder * @param codecRegistry The {@link MediaTypeCodecRegistry} to use for encoding and decoding objects * @param handlerRegistry The handler registry for encoding and decoding * @param webSocketBeanRegistry The websocket bean registry * @param requestBinderRegistry The request binder registry * @param eventLoopGroup The event loop group to use * @param socketChannelFactory The socket channel factory * @param udpChannelFactory The UDP channel factory * @param clientCustomizer The pipeline customizer * @param informationalServiceId Optional service ID that will be passed to exceptions created by this client * @param conversionService The conversion service * @param resolverGroup Optional predefined resolver group * @deprecated Please go through the {@link #builder()} instead. If you need access to properties that are not public in the builder, make them public in core and document their usage. */ @Deprecated public DefaultHttpClient(@Nullable LoadBalancer loadBalancer, @Nullable HttpVersionSelection explicitHttpVersion, @NonNull HttpClientConfiguration configuration, @Nullable String contextPath, @NonNull HttpClientFilterResolver<ClientFilterResolutionContext> filterResolver, @NonNull List<HttpFilterResolver.FilterEntry> clientFilterEntries, @Nullable ThreadFactory threadFactory, @NonNull ClientSslBuilder nettyClientSslBuilder, @NonNull MediaTypeCodecRegistry codecRegistry, @NonNull MessageBodyHandlerRegistry handlerRegistry, @NonNull WebSocketBeanRegistry webSocketBeanRegistry, @NonNull RequestBinderRegistry requestBinderRegistry, @Nullable EventLoopGroup eventLoopGroup, @NonNull ChannelFactory<? extends SocketChannel> socketChannelFactory, @NonNull ChannelFactory<? extends DatagramChannel> udpChannelFactory, NettyClientCustomizer clientCustomizer, @Nullable String informationalServiceId, ConversionService conversionService, @Nullable AddressResolverGroup<?> resolverGroup ) { this( builder() .loadBalancer(loadBalancer) .explicitHttpVersion(explicitHttpVersion) .configuration(configuration) .contextPath(contextPath) .filterResolver(filterResolver) .clientFilterEntries(clientFilterEntries) .threadFactory(threadFactory) .nettyClientSslBuilder(nettyClientSslBuilder) .codecRegistry(codecRegistry) .handlerRegistry(handlerRegistry) .webSocketBeanRegistry(webSocketBeanRegistry) .requestBinderRegistry(requestBinderRegistry) .eventLoopGroup(eventLoopGroup) .socketChannelFactory(socketChannelFactory) .udpChannelFactory(udpChannelFactory) .clientCustomizer(clientCustomizer) .informationalServiceId(informationalServiceId) .conversionService(conversionService) .resolverGroup(resolverGroup) ); } DefaultHttpClient(DefaultHttpClientBuilder builder) { this.loadBalancer = builder.loadBalancer; this.configuration = builder.configuration == null ? new DefaultHttpClientConfiguration() : builder.configuration; this.defaultCharset = configuration.getDefaultCharset(); if (StringUtils.isNotEmpty(builder.contextPath)) { if (builder.contextPath.charAt(0) != '/') { builder.contextPath = '/' + builder.contextPath; } this.contextPath = builder.contextPath; } else { this.contextPath = null; } this.mediaTypeCodecRegistry = builder.codecRegistry == null ? createDefaultMediaTypeRegistry() : builder.codecRegistry; this.handlerRegistry = builder.handlerRegistry == null ? createDefaultMessageBodyHandlerRegistry() : builder.handlerRegistry; this.log = configuration.getLoggerName().map(LoggerFactory::getLogger).orElse(DEFAULT_LOG); if (builder.filterResolver == null) { builder.filters(); } this.filterResolver = builder.filterResolver; if (builder.clientFilterEntries != null) { this.clientFilterEntries = builder.clientFilterEntries; } else { this.clientFilterEntries = builder.filterResolver.resolveFilterEntries( new ClientFilterResolutionContext(null, AnnotationMetadata.EMPTY_METADATA) ); } this.webSocketRegistry = builder.webSocketBeanRegistry; this.conversionService = builder.conversionService; this.requestBinderRegistry = builder.requestBinderRegistry == null ? new DefaultRequestBinderRegistry(conversionService) : builder.requestBinderRegistry; this.informationalServiceId = builder.informationalServiceId; this.blockingExecutor = builder.blockingExecutor; this.connectionManager = new ConnectionManager(log, configuration, builder); } /** * @param uri The URL * @deprecated Please go through the {@link #builder()} instead. */ @Deprecated public DefaultHttpClient(@Nullable URI uri) { this(builder().uri(uri)); } /** * @deprecated Please go through the {@link #builder()} instead. */ @Deprecated public DefaultHttpClient() { this(builder()); } /** * @param uri The URI * @param configuration The {@link HttpClientConfiguration} object * @deprecated Please go through the {@link #builder()} instead. */ @Deprecated public DefaultHttpClient(@Nullable URI uri, @NonNull HttpClientConfiguration configuration) { this( builder() .uri(uri) .configuration(configuration) ); } /** * Constructor used by micronaut-oracle-cloud. * * @param uri The URI * @param configuration The {@link HttpClientConfiguration} object * @param clientSslBuilder The SSL builder * @deprecated Please go through the {@link #builder()} instead. */ @Deprecated public DefaultHttpClient(@Nullable URI uri, @NonNull HttpClientConfiguration configuration, @NonNull ClientSslBuilder clientSslBuilder) { this( builder() .uri(uri) .configuration(configuration) .nettyClientSslBuilder(clientSslBuilder) ); } /** * @param loadBalancer The {@link LoadBalancer} to use for selecting servers * @param configuration The {@link HttpClientConfiguration} object * @deprecated Please go through the {@link #builder()} instead. If you need access to properties that are not public in the builder, make them public in core and document their usage. */ @Deprecated public DefaultHttpClient(@Nullable LoadBalancer loadBalancer, HttpClientConfiguration configuration) { this( builder() .loadBalancer(loadBalancer) .configuration(configuration) ); } /** * Create a new builder for a {@link DefaultHttpClient}. * * @return The builder * @since 4.7.0 */ @NonNull public static DefaultHttpClientBuilder builder() { return new DefaultHttpClientBuilder(); } static boolean isAcceptEvents(io.micronaut.http.HttpRequest<?> request) { String acceptHeader = request.getHeaders().get(io.micronaut.http.HttpHeaders.ACCEPT); return acceptHeader != null && acceptHeader.equalsIgnoreCase(MediaType.TEXT_EVENT_STREAM); } /** * @return The configuration used by this client */ public HttpClientConfiguration getConfiguration() { return configuration; } /** * @return The client-specific logger name */ public Logger getLog() { return log; } /** * Access to the connection manager, for micronaut-oracle-cloud. * * @return The connection manager of this client */ public ConnectionManager connectionManager() { return connectionManager; } @Override public HttpClient start() { if (!isRunning()) { connectionManager.start(); } return this; } @Override public boolean isRunning() { return connectionManager.isRunning(); } @Override public HttpClient stop() { if (isRunning()) { connectionManager.shutdown(); } return this; } /** * @return The {@link MediaTypeCodecRegistry} used by this client * @deprecated Use body handlers instead */ @Deprecated public MediaTypeCodecRegistry getMediaTypeCodecRegistry() { return mediaTypeCodecRegistry; } /** * Sets the {@link MediaTypeCodecRegistry} used by this client. * * @param mediaTypeCodecRegistry The registry to use. Should not be null * @deprecated Use builder instead */ @Deprecated(forRemoval = true) public void setMediaTypeCodecRegistry(MediaTypeCodecRegistry mediaTypeCodecRegistry) { if (mediaTypeCodecRegistry != null) { this.mediaTypeCodecRegistry = mediaTypeCodecRegistry; } } /** * Get the handler registry for this client. * * @return The handler registry */ @NonNull public final MessageBodyHandlerRegistry getHandlerRegistry() { return handlerRegistry; } /** * Set the handler registry for this client. * * @param handlerRegistry The handler registry * @deprecated Use builder instead */ @Deprecated(forRemoval = true) public final void setHandlerRegistry(@NonNull MessageBodyHandlerRegistry handlerRegistry) { this.handlerRegistry = handlerRegistry; } @Override public BlockingHttpClient toBlocking() { return new BlockingHttpClient() { @Override public void close() { DefaultHttpClient.this.close(); } @Override public <I, O, E> HttpResponse<O> exchange(io.micronaut.http.HttpRequest<I> request, Argument<O> bodyType, Argument<E> errorType) { if (!configuration.isAllowBlockEventLoop() && Thread.currentThread() instanceof FastThreadLocalThread) { throw new HttpClientException(""" You are trying to run a BlockingHttpClient operation on a netty event \ loop thread. This is a common cause for bugs: Event loops should \ never be blocked. You can either mark your controller as \ @ExecuteOn(TaskExecutors.BLOCKING), or use the reactive HTTP client \ to resolve this bug. There is also a configuration option to \ disable this check if you are certain a blocking operation is fine \ here."""); } BlockHint blockHint = BlockHint.willBlockThisThread(); return DefaultHttpClient.this.exchange(request, bodyType, errorType, blockHint).block(); // We don't have to release client response buffer } @Override public <I, O, E> O retrieve(io.micronaut.http.HttpRequest<I> request, Argument<O> bodyType, Argument<E> errorType) { // mostly copied from super method, but with customizeException HttpResponse<O> response = exchange(request, bodyType, errorType); if (HttpStatus.class.isAssignableFrom(bodyType.getType())) { return (O) response.getStatus(); } else { Optional<O> body = response.getBody(); if (body.isEmpty() && response.getBody(Argument.of(byte[].class)).isPresent()) { throw decorate(new HttpClientResponseException( "Failed to decode the body for the given content type [%s]".formatted(response.getContentType().orElse(null)), response )); } else { return body.orElseThrow(() -> decorate(new HttpClientResponseException( "Empty body", response ))); } } } @Override public boolean isRunning() { return DefaultHttpClient.this.isRunning(); } @Override public BlockingHttpClient start() { return DefaultHttpClient.this.start().toBlocking(); } @Override public BlockingHttpClient stop() { return DefaultHttpClient.this.stop().toBlocking(); } }; } @NonNull private <I> MutableHttpRequest<?> toMutableRequest(io.micronaut.http.HttpRequest<I> request) { return MutableHttpRequestWrapper.wrapIfNecessary(conversionService, request); } @SuppressWarnings("SubscriberImplementation") @Override public <I> Publisher<Event<ByteBuffer<?>>> eventStream(io.micronaut.http.@NonNull HttpRequest<I> request) { setupConversionService(request); return eventStreamOrError(request, null); } private <I> Publisher<Event<ByteBuffer<?>>> eventStreamOrError(io.micronaut.http.@NonNull HttpRequest<I> request, @NonNull Argument<?> errorType) { if (request instanceof MutableHttpRequest<?> httpRequest) { httpRequest.accept(MediaType.TEXT_EVENT_STREAM_TYPE); } return Flux.create(emitter -> dataStream(request, errorType).subscribe(new Subscriber<>() { private Subscription dataSubscription; private CurrentEvent currentEvent; @Override public void onSubscribe(Subscription s) { this.dataSubscription = s; Disposable cancellable = () -> dataSubscription.cancel(); emitter.onCancel(cancellable); if (!emitter.isCancelled() && emitter.requestedFromDownstream() > 0) { // request the first chunk dataSubscription.request(1); } } @Override public void onNext(ByteBuffer<?> buffer) { try { int len = buffer.readableBytes(); // a length of zero indicates the start of a new event // emit the current event if (len == 0) { try { Event event = Event.of(byteBufferFactory.wrap(currentEvent.data)) .name(currentEvent.name) .retry(currentEvent.retry) .id(currentEvent.id); emitter.next( event ); } finally { currentEvent = null; } } else { if (currentEvent == null) { currentEvent = new CurrentEvent(); } int colonIndex = buffer.indexOf((byte) ':'); // SSE comments start with colon, so skip if (colonIndex > 0) { // obtain the type String type = buffer.slice(0, colonIndex).toString(StandardCharsets.UTF_8).trim(); int fromIndex = colonIndex + 1; // skip the white space before the actual data if (buffer.getByte(fromIndex) == ((byte) ' ')) { fromIndex++; } if (fromIndex < len) { int toIndex = len - fromIndex; switch (type) { case "data" -> { ByteBuffer<?> content = buffer.slice(fromIndex, toIndex); byte[] d = currentEvent.data; if (d == null) { currentEvent.data = content.toByteArray(); } else { currentEvent.data = ArrayUtils.concat(d, content.toByteArray()); } } case "id" -> { ByteBuffer<?> id = buffer.slice(fromIndex, toIndex); currentEvent.id = id.toString(StandardCharsets.UTF_8).trim(); } case "event" -> { ByteBuffer<?> event = buffer.slice(fromIndex, toIndex); currentEvent.name = event.toString(StandardCharsets.UTF_8).trim(); } case "retry" -> { ByteBuffer<?> retry = buffer.slice(fromIndex, toIndex); String text = retry.toString(StandardCharsets.UTF_8); if (!StringUtils.isEmpty(text)) { currentEvent.retry = Duration.ofMillis(Long.parseLong(text)); } } default -> { // ignore message } } } } } if (emitter.requestedFromDownstream() > 0 && !emitter.isCancelled()) { dataSubscription.request(1); } } catch (Throwable e) { onError(e); } finally { if (buffer instanceof ReferenceCounted counted) { counted.release(); } } } @Override public void onError(Throwable t) { dataSubscription.cancel(); if (t instanceof HttpClientException) { emitter.error(t); } else { emitter.error(decorate(new HttpClientException("Error consuming Server Sent Events: " + t.getMessage(), t))); } } @Override public void onComplete() { emitter.complete(); } }), FluxSink.OverflowStrategy.BUFFER); } private static <T> Mono<T> toMono(ExecutionFlow<T> flow, PropagatedContext context) { return Mono.from(ReactivePropagation.propagate(context, ReactiveExecutionFlow.toPublisher(flow))); } @Override public <I, B> Publisher<Event<B>> eventStream(io.micronaut.http.@NonNull HttpRequest<I> request, @NonNull Argument<B> eventType) { setupConversionService(request); return eventStream(request, eventType, DEFAULT_ERROR_TYPE); } @Override public <I, B> Publisher<Event<B>> eventStream(io.micronaut.http.@NonNull HttpRequest<I> request, @NonNull Argument<B> eventType, @NonNull Argument<?> errorType) { setupConversionService(request); MessageBodyReader<B> reader = handlerRegistry.getReader(eventType, List.of(MediaType.APPLICATION_JSON_TYPE)); return Flux.from(eventStreamOrError(request, errorType)).map(byteBufferEvent -> { ByteBuffer<?> data = byteBufferEvent.getData(); B decoded = reader.read(eventType, MediaType.APPLICATION_JSON_TYPE, request.getHeaders(), data); return Event.of(byteBufferEvent, decoded); }); } @Override public <I> Publisher<ByteBuffer<?>> dataStream(io.micronaut.http.@NonNull HttpRequest<I> request) { setupConversionService(request); return dataStream(request, DEFAULT_ERROR_TYPE); } @Override public <I> Publisher<ByteBuffer<?>> dataStream(io.micronaut.http.@NonNull HttpRequest<I> request, @NonNull Argument<?> errorType) { setupConversionService(request); PropagatedContext propagatedContext = PropagatedContext.getOrEmpty(); return new MicronautFlux<>(toMono(resolveRequestURI(request), propagatedContext) .flatMapMany(requestURI -> dataStreamImpl(toMutableRequest(request), errorType, propagatedContext, requestURI)) .map(bb -> { if (bb.asNativeBuffer() instanceof ByteBuf byteBuf && byteBuf.refCnt() > 1) { // if we aren't the exclusive owner of this buffer, we need to detect whether // the downstream consumer releases it or not. For that, we need our own // refCnt. A composite buffer provides that. CompositeByteBuf composite = byteBuf.alloc().compositeBuffer(1); composite.addComponent(true, byteBuf); return byteBufferFactory.wrap(composite); } else { return bb; } })) .doAfterNext(buffer -> { Object o = buffer.asNativeBuffer(); if (o instanceof ByteBuf byteBuf) { if (byteBuf.refCnt() > 0) { ReferenceCountUtil.safeRelease(byteBuf); } } }); } @Override public <I> Publisher<HttpResponse<ByteBuffer<?>>> exchangeStream(io.micronaut.http.@NonNull HttpRequest<I> request) { return exchangeStream(request, DEFAULT_ERROR_TYPE); } @Override public <I> Publisher<HttpResponse<ByteBuffer<?>>> exchangeStream(io.micronaut.http.@NonNull HttpRequest<I> request, @NonNull Argument<?> errorType) { setupConversionService(request); PropagatedContext propagatedContext = PropagatedContext.getOrEmpty(); return new MicronautFlux<>(toMono(resolveRequestURI(request), propagatedContext) .flatMapMany(uri -> exchangeStreamImpl(propagatedContext, toMutableRequest(request), errorType, uri))) .doAfterNext(byteBufferHttpResponse -> { ByteBuffer<?> buffer = byteBufferHttpResponse.body(); if (buffer instanceof ReferenceCounted counted) { counted.release(); } }); } @Override public <I, O> Publisher<O> jsonStream(io.micronaut.http.@NonNull HttpRequest<I> request, @NonNull Argument<O> type) { return jsonStream(request, type, DEFAULT_ERROR_TYPE); } @Override public <I, O> Publisher<O> jsonStream(io.micronaut.http.@NonNull HttpRequest<I> request, @NonNull Argument<O> type, @NonNull Argument<?> errorType) { setupConversionService(request); PropagatedContext propagatedContext = PropagatedContext.getOrEmpty(); return Flux.from(toMono(resolveRequestURI(request), propagatedContext) .flatMapMany(requestURI -> jsonStreamImpl(propagatedContext, toMutableRequest(request), type, errorType, requestURI))); } @SuppressWarnings("unchecked") @Override public <I> Publisher<Map<String, Object>> jsonStream(io.micronaut.http.@NonNull HttpRequest<I> request) { return (Publisher) jsonStream(request, Map.class); } @Override public <I, O> Publisher<O> jsonStream(io.micronaut.http.@NonNull HttpRequest<I> request, @NonNull Class<O> type) { setupConversionService(request); return jsonStream(request, Argument.of(type)); } @Override public <I, O, E> Publisher<HttpResponse<O>> exchange(io.micronaut.http.@NonNull HttpRequest<I> request, @NonNull Argument<O> bodyType, @NonNull Argument<E> errorType) { return exchange(request, bodyType, errorType, null) // some tests expect flux... .flux(); } @NonNull private <I, O, E> Mono<HttpResponse<O>> exchange(io.micronaut.http.HttpRequest<I> request, Argument<O> bodyType, Argument<E> errorType, @Nullable BlockHint blockHint) { setupConversionService(request); PropagatedContext propagatedContext = PropagatedContext.getOrEmpty(); // if a connection is available immediately, we can use its executor for the timeout // instead of a random executor for the whole group AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(connectionManager.getGroup()); ExecutionFlow<HttpResponse<O>> mono = resolveRequestURI(request).flatMap(uri -> { MutableHttpRequest<?> mutableRequest = toMutableRequest(request).uri(uri); //noinspection unchecked return sendRequestWithRedirects( propagatedContext, scheduler, blockHint, mutableRequest, (req, resp) -> InternalByteBody.bufferFlow(resp.byteBody()) .onErrorResume(t -> ExecutionFlow.error(handleResponseError(mutableRequest, t))) .flatMap(av -> handleExchangeResponse(bodyType, errorType, resp, av)) ).map(r -> (HttpResponse<O>) r); }); Duration requestTimeout = configuration.getRequestTimeout(); if (requestTimeout == null) { // for compatibility requestTimeout = configuration.getReadTimeout() .filter(d -> !d.isNegative()) .map(d -> d.plusSeconds(1)).orElse(null); } if (requestTimeout != null) { if (!requestTimeout.isNegative()) { mono = mono.timeout(requestTimeout, scheduler.get(), null) .onErrorResume(throwable -> { if (throwable instanceof TimeoutException) { return ExecutionFlow.error(ReadTimeoutException.TIMEOUT_EXCEPTION); } return ExecutionFlow.error(throwable); }); } } return toMono(mono, propagatedContext); } private <O, E> @NonNull ExecutionFlow<FullNettyClientHttpResponse<O>> handleExchangeResponse(Argument<O> bodyType, Argument<E> errorType, NettyClientByteBodyResponse resp, CloseableAvailableByteBody av) { ByteBuf buf = NettyByteBodyFactory.toByteBuf(av); DefaultFullHttpResponse fullHttpResponse = new DefaultFullHttpResponse( resp.nettyResponse.protocolVersion(), resp.nettyResponse.status(), buf, resp.nettyResponse.headers(), EmptyHttpHeaders.INSTANCE ); try { if (log.isTraceEnabled()) { traceBody("Response", fullHttpResponse.content()); } boolean convertBodyWithBodyType = shouldConvertWithBodyType(fullHttpResponse, this.configuration, bodyType, errorType); FullNettyClientHttpResponse<O> response = new FullNettyClientHttpResponse<>(fullHttpResponse, handlerRegistry, bodyType, convertBodyWithBodyType, conversionService); if (convertBodyWithBodyType) { return ExecutionFlow.just(response); } else { // error flow try { return ExecutionFlow.error(makeErrorFromRequestBody(errorType, fullHttpResponse.status(), response)); } catch (HttpClientResponseException t) { return ExecutionFlow.error(t); } catch (Exception t) { return ExecutionFlow.error(makeErrorBodyParseError(fullHttpResponse, t)); } } } catch (HttpClientResponseException t) { return ExecutionFlow.error(t); } catch (Exception t) { FullNettyClientHttpResponse<Object> response = new FullNettyClientHttpResponse<>( fullHttpResponse, handlerRegistry, null, false, conversionService ); HttpClientResponseException clientResponseError = decorate(new HttpClientResponseException( "Error decoding HTTP response body: " + t.getMessage(), t, response, new HttpClientErrorDecoder() { @Override public Argument<?> getErrorType(MediaType mediaType) { return errorType; } } )); return ExecutionFlow.error(clientResponseError); } finally { fullHttpResponse.release(); } } @Override public <I, O, E> Publisher<O> retrieve(io.micronaut.http.HttpRequest<I> request, Argument<O> bodyType, Argument<E> errorType) { setupConversionService(request); // mostly same as default impl, but with exception customization Flux<HttpResponse<O>> exchange = Flux.from(exchange(request, bodyType, errorType)); if (bodyType.getType() == void.class) { // exchange() returns a HttpResponse<Void>, we can't map the Void body properly, so just drop it and complete return (Publisher<O>) exchange.ignoreElements(); } return exchange.map(response -> { if (bodyType.getType() == HttpStatus.class) { return (O) response.getStatus(); } else { Optional<O> body = response.getBody(); if (body.isEmpty() && response.getBody(byte[].class).isPresent()) { throw decorate(new HttpClientResponseException( "Failed to decode the body for the given content type [%s]".formatted(response.getContentType().orElse(null)), response )); } else { return body.orElseThrow(() -> decorate(new HttpClientResponseException( "Empty body", response ))); } } }); } @Override public <T extends AutoCloseable> Publisher<T> connect(Class<T> clientEndpointType, MutableHttpRequest<?> request) { setupConversionService(request); return toMono(resolveRequestURI(request), PropagatedContext.getOrEmpty()).flux() .switchMap(resolvedURI -> connectWebSocket(resolvedURI, request, clientEndpointType, null)); } @Override public <T extends AutoCloseable> Publisher<T> connect(Class<T> clientEndpointType, Map<String, Object> parameters) { WebSocketBean<T> webSocketBean = webSocketRegistry.getWebSocket(clientEndpointType); String uri = webSocketBean.getBeanDefinition().stringValue(ClientWebSocket.class).orElse("/ws"); uri = UriTemplate.of(uri).expand(parameters); MutableHttpRequest<Object> request = io.micronaut.http.HttpRequest.GET(uri); return toMono(resolveRequestURI(request), PropagatedContext.getOrEmpty()).flux() .switchMap(resolvedURI -> connectWebSocket(resolvedURI, request, clientEndpointType, webSocketBean)); } @Override public void close() { stop(); } private <T> Publisher<T> connectWebSocket(URI uri, MutableHttpRequest<?> request, Class<T> clientEndpointType, WebSocketBean<T> webSocketBean) { RequestKey requestKey; try { requestKey = new RequestKey(this, uri); } catch (HttpClientException e) { return Flux.error(e); } if (webSocketBean == null) { webSocketBean = webSocketRegistry.getWebSocket(clientEndpointType); } WebSocketVersion protocolVersion = webSocketBean.getBeanDefinition().enumValue(ClientWebSocket.class, "version", WebSocketVersion.class).orElse(WebSocketVersion.V13); int maxFramePayloadLength = webSocketBean.messageMethod() .map(m -> m.intValue(OnMessage.class, "maxPayloadLength") .orElse(65536)).orElse(65536); String subprotocol = webSocketBean.getBeanDefinition().stringValue(ClientWebSocket.class, "subprotocol").orElse(StringUtils.EMPTY_STRING); URI webSocketURL = UriBuilder.of(uri) .scheme(!requestKey.isSecure() ? "ws" : "wss") .host(requestKey.getHost()) .port(requestKey.getPort()) .build(); MutableHttpHeaders headers = request.getHeaders(); HttpHeaders customHeaders = EmptyHttpHeaders.INSTANCE; if (headers instanceof NettyHttpHeaders httpHeaders) { customHeaders = httpHeaders.getNettyHeaders(); } if (StringUtils.isNotEmpty(subprotocol)) { NettyHttpHeaders.validateHeader("Sec-WebSocket-Protocol", subprotocol); customHeaders.add("Sec-WebSocket-Protocol", subprotocol); } NettyWebSocketClientHandler<T> handler = new NettyWebSocketClientHandler<>( request, webSocketBean, WebSocketClientHandshakerFactory.newHandshaker( webSocketURL, protocolVersion, subprotocol, true, customHeaders, maxFramePayloadLength), requestBinderRegistry, mediaTypeCodecRegistry, handlerRegistry, conversionService); if (!isRunning()) { return Mono.error(decorate(new HttpClientException("The client is closed, unable to connect for websocket."))); } return connectionManager.connectForWebsocket(requestKey, handler) .then(handler.getHandshakeCompletedMono()); } private <I> Flux<HttpResponse<ByteBuffer<?>>> exchangeStreamImpl(PropagatedContext propagatedContext, MutableHttpRequest<I> request, Argument<?> errorType, URI requestURI) { Flux<HttpResponse<?>> streamResponsePublisher = toMono(buildStreamExchange(propagatedContext, request, requestURI, errorType), propagatedContext).flux(); return streamResponsePublisher.switchMap(response -> { StreamedHttpResponse streamedHttpResponse = NettyHttpResponseBuilder.toStreamResponse(response); Flux<HttpContent> httpContentReactiveSequence = Flux.from(streamedHttpResponse); return httpContentReactiveSequence .filter(message -> !(message.content() instanceof EmptyByteBuf)) .map(message -> { ByteBuf byteBuf = message.content(); if (log.isTraceEnabled()) { log.trace("HTTP Client Streaming Response Received Chunk (length: {}) for Request: {} {}", byteBuf.readableBytes(), request.getMethodName(), request.getUri()); traceBody("Response", byteBuf); } ByteBuffer<?> byteBuffer = byteBufferFactory.wrap(byteBuf); NettyStreamedHttpResponse<ByteBuffer<?>> thisResponse = new NettyStreamedHttpResponse<>(streamedHttpResponse, conversionService); thisResponse.setBody(byteBuffer); return (HttpResponse<ByteBuffer<?>>) new HttpResponseWrapper<>(thisResponse); }); }); } private <I, O> Flux<O> jsonStreamImpl(PropagatedContext propagatedContext, MutableHttpRequest<I> request, Argument<O> type, Argument<?> errorType, URI requestURI) { return toMono(buildStreamExchange(propagatedContext, request, requestURI, errorType), propagatedContext).flux().switchMap(response -> { if (!(response instanceof NettyStreamedHttpResponse)) { throw new IllegalStateException("Response has been wrapped in non streaming type. Do not wrap the response in client filters for stream requests"); } StreamedHttpResponse streamResponse = NettyHttpResponseBuilder.toStreamResponse(response); // could also be application/json, in which case we will stream an array MediaType mediaType = response.getContentType().orElse(MediaType.APPLICATION_JSON_STREAM_TYPE); ChunkedMessageBodyReader<O> reader = (ChunkedMessageBodyReader<O>) handlerRegistry.getReader(type, List.of(mediaType)); return reader.readChunked(type, mediaType, response.getHeaders(), Flux.from(streamResponse).map(c -> NettyByteBufferFactory.DEFAULT.wrap(c.content()))); }); } private <I> Flux<ByteBuffer<?>> dataStreamImpl(MutableHttpRequest<I> request, Argument<?> errorType, PropagatedContext propagatedContext, URI requestURI) { Flux<HttpResponse<?>> streamResponsePublisher = toMono(buildStreamExchange(propagatedContext, request, requestURI, errorType), propagatedContext).flux(); Function<HttpContent, ByteBuffer<?>> contentMapper = message -> { ByteBuf byteBuf = message.content(); return byteBufferFactory.wrap(byteBuf); }; return streamResponsePublisher.switchMap(response -> { if (!(response instanceof NettyStreamedHttpResponse)) { throw new IllegalStateException("Response has been wrapped in non streaming type. Do not wrap the response in client filters for stream requests"); } NettyStreamedHttpResponse nettyStreamedHttpResponse = (NettyStreamedHttpResponse) response; Flux<HttpContent> httpContentReactiveSequence = Flux.from(nettyStreamedHttpResponse.getNettyResponse()); return httpContentReactiveSequence .filter(message -> !(message.content() instanceof EmptyByteBuf)) .map(contentMapper); }); } /** * Implementation of {@link #jsonStream}, {@link #dataStream}, {@link #exchangeStream}. */ @SuppressWarnings("MagicNumber") private <I> ExecutionFlow<HttpResponse<?>> buildStreamExchange( @Nullable PropagatedContext propagatedContext, @NonNull MutableHttpRequest<I> request, @NonNull URI requestURI, @Nullable Argument<?> errorType) { return this.sendRequestWithRedirects( propagatedContext, null, request.uri(requestURI), (req, resp) -> { ByteBody bb = resp.byteBody(); Publisher<HttpContent> body; if (!hasBody(resp)) { resp.close(); body = Flux.empty(); } else { if (isAcceptEvents(req)) { if (bb instanceof AvailableByteBody anbb) { // same semantics as the streaming branch, but this is eager so it's more // lax wrt unclosed responses. ByteBuf single = NettyByteBodyFactory.toByteBuf(anbb); List<ByteBuf> parts = SseSplitter.split(single); parts.get(parts.size() - 1).release(); body = Flux.fromIterable(parts.subList(0, parts.size() - 1)).map(DefaultHttpContent::new); } else { body = SseSplitter.split(NettyByteBodyFactory.toByteBufs(bb), sizeLimits()).map(DefaultHttpContent::new); } } else { body = NettyByteBodyFactory.toByteBufs(bb).map(DefaultHttpContent::new); } } return readBodyOnError(errorType, ExecutionFlow.<HttpResponse<?>>just(toStreamingResponse(resp, body)) .flatMap(r -> handleStreamHttpError(r, true))); } ); } private <B> MutableHttpResponse<B> toStreamingResponse(NettyClientByteBodyResponse resp, Publisher<HttpContent> content) { DefaultStreamedHttpResponse nettyResponse = new DefaultStreamedHttpResponse( resp.nettyResponse.protocolVersion(), resp.nettyResponse.status(), resp.getHeaders().getNettyHeaders(), content ); return new NettyStreamedHttpResponse<>(nettyResponse, conversionService); } @Override public Publisher<MutableHttpResponse<?>> proxy(io.micronaut.http.@NonNull HttpRequest<?> request) { return proxy(request, ProxyRequestOptions.getDefault()); } @Override public Publisher<MutableHttpResponse<?>> proxy(io.micronaut.http.@NonNull HttpRequest<?> request, @NonNull ProxyRequestOptions options) { Objects.requireNonNull(options, "options"); setupConversionService(request); PropagatedContext propagatedContext = PropagatedContext.getOrEmpty(); return toMono(resolveRequestURI(request) .flatMap(requestURI -> { MutableHttpRequest<?> httpRequest = toMutableRequest(request); if (!options.isRetainHostHeader()) { httpRequest.headers(headers -> headers.remove(HttpHeaderNames.HOST)); } return this.sendRequestWithRedirects( propagatedContext, null, httpRequest.uri(requestURI), (req, resp) -> { Publisher<HttpContent> body; if (!hasBody(resp)) { resp.close(); body = Flux.empty(); } else { body = NettyByteBodyFactory.toByteBufs(resp.byteBody()).map(DefaultHttpContent::new); } return ExecutionFlow.<HttpResponse<?>>just(toStreamingResponse(resp, body)) .flatMap(r -> handleStreamHttpError(r, false)); } ); }) .map(HttpResponse::toMutableResponse), propagatedContext); } private void setupConversionService(io.micronaut.http.HttpRequest<?> httpRequest) { if (httpRequest instanceof ConversionServiceAware aware) { aware.setConversionService(conversionService); } } /** * @param request The request * @param <I> The input type * @return A {@link Publisher} with the resolved URI */ protected <I> ExecutionFlow<URI> resolveRequestURI(io.micronaut.http.HttpRequest<I> request) { return resolveRequestURI(request, true); } /** * @param request The request * @param includeContextPath Whether to prepend the client context path * @param <I> The input type * @return A {@link Publisher} with the resolved URI */ protected <I> ExecutionFlow<URI> resolveRequestURI(io.micronaut.http.HttpRequest<I> request, boolean includeContextPath) { URI requestURI = request.getUri(); if (requestURI.getScheme() != null) { // if the request URI includes a scheme then it is fully qualified so use the direct server return ExecutionFlow.just(requestURI); } else { return resolveURI(request, includeContextPath); } } /** * @param parentRequest The parent request * @param request The redirect location request * @param <I> The input type * @return A {@link Publisher} with the resolved URI */ protected <I> ExecutionFlow<URI> resolveRedirectURI(io.micronaut.http.HttpRequest<?> parentRequest, io.micronaut.http.HttpRequest<I> request) { URI requestURI = request.getUri(); if (requestURI.getScheme() != null) { // if the request URI includes a scheme then it is fully qualified so use the direct server return ExecutionFlow.just(requestURI); } else { if (parentRequest == null || parentRequest.getUri().getHost() == null) { return resolveURI(request, false); } else { URI parentURI = parentRequest.getUri(); UriBuilder uriBuilder = UriBuilder.of(requestURI) .scheme(parentURI.getScheme()) .userInfo(parentURI.getUserInfo()) .host(parentURI.getHost()) .port(parentURI.getPort()); return ExecutionFlow.just(uriBuilder.build()); } } } /** * @return The discriminator to use when selecting a server for the purposes of load balancing (defaults to null) */ protected Object getLoadBalancerDiscriminator() { return null; } /** * @param request The request * @param requestURI The URI of the request * @param requestContentType The request content type * @param permitsBody Whether permits body * @return The body * @throws HttpPostRequestEncoder.ErrorDataEncoderException if there is an encoder exception */ private CloseableByteBody buildNettyRequest( MutableHttpRequest<?> request, URI requestURI, MediaType requestContentType, boolean permitsBody, Channel channel) throws HttpPostRequestEncoder.ErrorDataEncoderException { NettyByteBodyFactory byteBodyFactory = new NettyByteBodyFactory(channel); if (!request.getHeaders().contains(HttpHeaderNames.HOST)) { request.getHeaders().set(HttpHeaderNames.HOST, getHostHeader(requestURI)); } if (permitsBody) { Optional<?> body = request.getBody(); if (body.isPresent()) { if (!request.getHeaders().contains(HttpHeaderNames.CONTENT_TYPE)) { MediaType mediaType = request.getContentType().orElse(MediaType.APPLICATION_JSON_TYPE); request.getHeaders().set(HttpHeaderNames.CONTENT_TYPE, mediaType); } } } NettyHttpRequestBuilder nettyRequestBuilder = NettyHttpRequestBuilder.asBuilder(request); ByteBody direct = nettyRequestBuilder.byteBodyDirect(); if (direct != null) { return direct.move(); } if (permitsBody) { Optional<?> body = request.getBody(); boolean hasBody = body.isPresent(); if (requestContentType.equals(MediaType.APPLICATION_FORM_URLENCODED_TYPE) && hasBody) { Object bodyValue = body.get(); if (bodyValue instanceof CharSequence sequence) { ReadBuffer byteBuf = charSequenceToByteBuf(sequence, requestContentType); return byteBodyFactory.adapt(byteBuf); } else { return buildFormRequest(request, byteBodyFactory, r -> buildFormDataRequest(r, bodyValue)); } } else if (requestContentType.equals(MediaType.MULTIPART_FORM_DATA_TYPE) && hasBody) { return buildFormRequest(request, byteBodyFactory, r -> buildMultipartRequest(r, body.get())); } else { ReadBuffer bodyContent; if (hasBody) { Object bodyValue = body.get(); if (Publishers.isConvertibleToPublisher(bodyValue)) { boolean isSingle = Publishers.isSingle(bodyValue.getClass()); Publisher<?> publisher = conversionService.convert(bodyValue, Publisher.class).orElseThrow(() -> new IllegalArgumentException("Unconvertible reactive type: " + bodyValue) ); Flux<HttpContent> requestBodyPublisher = Flux.from(publisher).map(value -> { Argument<Object> type = Argument.ofInstance(value); ByteBuffer<?> buffer = handlerRegistry.getWriter(type, List.of(requestContentType)) .writeTo(type, requestContentType, value, request.getHeaders(), byteBufferFactory); return new DefaultHttpContent(((ByteBuf) buffer.asNativeBuffer())); }); if (!isSingle && MediaType.APPLICATION_JSON_TYPE.equals(requestContentType)) { requestBodyPublisher = JsonSubscriber.lift(requestBodyPublisher); } return byteBodyFactory.adapt(requestBodyPublisher.map(ByteBufHolder::content), nettyRequestBuilder.toHttpRequestWithoutBody().headers(), null); } else if (bodyValue instanceof CharSequence sequence) { bodyContent = charSequenceToByteBuf(sequence, requestContentType); } else { Argument<Object> type = Argument.ofInstance(bodyValue); ByteBuffer<?> buffer = handlerRegistry.getWriter(type, List.of(requestContentType)) .writeTo(type, requestContentType, bodyValue, request.getHeaders(), byteBufferFactory); bodyContent = byteBodyFactory.readBufferFactory().adapt(buffer); } } else { bodyContent = byteBodyFactory.readBufferFactory().createEmpty(); } return byteBodyFactory.adapt(bodyContent); } } else { return NettyByteBodyFactory.empty(); } } private static boolean requiresRequestBody(HttpMethod method) { return method != null && (method.equals(HttpMethod.POST) || method.equals(HttpMethod.PUT) || method.equals(HttpMethod.PATCH)); } private static boolean permitsRequestBody(HttpMethod method) { return method != null && (requiresRequestBody(method) || method.equals(HttpMethod.OPTIONS) || method.equals(HttpMethod.DELETE) ); } private void completeExceptionallySafe(DelayedExecutionFlow<?> flow, Throwable exc) { if (!flow.tryCompleteExceptionally(exc)) { log.debug("Client exception suppressed because response flow already completed", exc); } } private ExecutionFlow<HttpResponse<?>> readBodyOnError(@Nullable Argument<?> errorType, @NonNull ExecutionFlow<HttpResponse<?>> publisher) { if (errorType != null && errorType != HttpClient.DEFAULT_ERROR_TYPE) { return publisher.onErrorResume(clientException -> { if (clientException instanceof HttpClientResponseException exception) { final HttpResponse<?> response = exception.getResponse(); if (response instanceof NettyStreamedHttpResponse<?> streamedResponse) { DelayedExecutionFlow<HttpResponse<?>> delayed = DelayedExecutionFlow.create(); final StreamedHttpResponse nettyResponse = streamedResponse.getNettyResponse(); nettyResponse.subscribe(new Subscriber<>() { final CompositeByteBuf buffer = byteBufferFactory.getNativeAllocator().compositeBuffer(); Subscription s; @Override public void onSubscribe(Subscription s) { this.s = s; s.request(1); } @Override public void onNext(HttpContent httpContent) { buffer.addComponent(true, httpContent.content()); s.request(1); } @Override public void onError(Throwable t) { buffer.release(); completeExceptionallySafe(delayed, t); } @Override public void onComplete() { try { FullHttpResponse fullHttpResponse = new DefaultFullHttpResponse(nettyResponse.protocolVersion(), nettyResponse.status(), buffer, nettyResponse.headers(), new DefaultHttpHeaders(true)); final FullNettyClientHttpResponse<Object> fullNettyClientHttpResponse = new FullNettyClientHttpResponse<>(fullHttpResponse, handlerRegistry, (Argument<Object>) errorType, true, conversionService); completeExceptionallySafe(delayed, decorate(new HttpClientResponseException( fullHttpResponse.status().reasonPhrase(), null, fullNettyClientHttpResponse, new HttpClientErrorDecoder() { @Override public Argument<?> getErrorType(MediaType mediaType) { return errorType; } } ))); } finally { buffer.release(); } } }); return delayed; } } return ExecutionFlow.error(clientException); }); } return publisher; } private <I> ExecutionFlow<URI> resolveURI(io.micronaut.http.HttpRequest<I> request, boolean includeContextPath) { URI requestURI = request.getUri(); if (loadBalancer == null) { return ExecutionFlow.error(decorate(new NoHostException("Request URI specifies no host to connect to"))); } ExecutionFlow<ServiceInstance> selected; if (loadBalancer instanceof FixedLoadBalancer fixed) { selected = ExecutionFlow.just(fixed.getServiceInstance()); } else { selected = ReactiveExecutionFlow.fromPublisher(loadBalancer.select(getLoadBalancerDiscriminator())); } return selected.map(server -> { Optional<String> authInfo = server.getMetadata().get(io.micronaut.http.HttpHeaders.AUTHORIZATION_INFO, String.class); if (request instanceof MutableHttpRequest<?> httpRequest && authInfo.isPresent()) { httpRequest.getHeaders().auth(authInfo.get()); } try { return server.resolve(includeContextPath ? ContextPathUtils.prepend(requestURI, contextPath) : requestURI); } catch (URISyntaxException e) { throw decorate(new HttpClientException("Failed to construct the request URI", e)); } } ); } private <R extends HttpResponse<?>> ExecutionFlow<R> handleStreamHttpError( R response, boolean failOnError ) { boolean errorStatus = response.code() >= 400; if (errorStatus && failOnError) { // todo: close response properly return ExecutionFlow.error(decorate(new HttpClientResponseException(response.reason(), response))); } else { return ExecutionFlow.just(response); } } @Override public Publisher<? extends HttpResponse<?>> exchange(io.micronaut.http.HttpRequest<?> request, @Nullable CloseableByteBody requestBody, @Nullable Thread blockedThread) { if (requestBody == null) { requestBody = NettyByteBodyFactory.empty(); } PropagatedContext propagatedContext = PropagatedContext.getOrEmpty(); ExecutionFlow<HttpResponse<?>> mono = null; try { mono = sendRequestWithRedirects( propagatedContext, blockedThread == null ? null : new BlockHint(blockedThread, null), new RawHttpRequestWrapper<>(conversionService, request.toMutableRequest(), requestBody), (req, resp) -> ExecutionFlow.just(resp) ); } finally { if (mono == null) { requestBody.close(); } } return toMono(mono, propagatedContext).doOnTerminate(requestBody::close); } private ExecutionFlow<HttpResponse<?>> sendRequestWithRedirects( PropagatedContext propagatedContext, @Nullable BlockHint blockHint, MutableHttpRequest<?> request, BiFunction<MutableHttpRequest<?>, NettyClientByteBodyResponse, ? extends ExecutionFlow<? extends HttpResponse<?>>> readResponse ) { return sendRequestWithRedirects(propagatedContext, new AtomicReference<>(), blockHint, request, readResponse); } /** * This is the high-level request method. It sits above {@link #sendRawRequest} and handles * things like filters, error handling, response parsing, request writing. * * @param propagatedContext The context propagated from the original client call * @param preferredScheduler A reference holding the preferred scheduler for timeouts. This is * replaced by the connection event loop ASAP so that callers can take * advantage of locality * @param blockHint The optional block hint * @param request The request to send. Must have resolved absolute URI (see {@link #resolveURI}) * @param readResponse Function that reads the response from the raw * {@link NettyClientByteBodyResponse} representation. This is run exactly * once, but if there is a redirect, it potentially runs with a different * request than the original (which is why it has a request parameter) * @return A mono containing the response */ private ExecutionFlow<HttpResponse<?>> sendRequestWithRedirects( PropagatedContext propagatedContext, AtomicReference<ScheduledExecutorService> preferredScheduler, @Nullable BlockHint blockHint, MutableHttpRequest<?> request, BiFunction<MutableHttpRequest<?>, NettyClientByteBodyResponse, ? extends ExecutionFlow<? extends HttpResponse<?>>> readResponse ) { if (informationalServiceId != null && BasicHttpAttributes.getServiceId(request).isEmpty()) { ClientAttributes.setServiceId(request, informationalServiceId); } List<GenericHttpFilter> filters = filterResolver.resolveFilters(request, clientFilterEntries); FilterRunner.sortReverse(filters); FilterRunner runner = new FilterRunner(filters) { @Override protected ExecutionFlow<HttpResponse<?>> provideResponse(io.micronaut.http.HttpRequest<?> request, PropagatedContext propagatedContext) { try { try (PropagatedContext.Scope ignore = propagatedContext.propagate()) { return sendRequestWithRedirectsNoFilter( propagatedContext, preferredScheduler, blockHint, MutableHttpRequestWrapper.wrapIfNecessary(conversionService, request), readResponse ); } } catch (Throwable e) { return ExecutionFlow.error(e); } } }; return runner.run(request, propagatedContext); } private ExecutionFlow<HttpResponse<?>> sendRequestWithRedirectsNoFilter( PropagatedContext propagatedContext, AtomicReference<ScheduledExecutorService> preferredScheduler, @Nullable BlockHint blockHint, MutableHttpRequest<?> request, BiFunction<MutableHttpRequest<?>, NettyClientByteBodyResponse, ? extends ExecutionFlow<? extends HttpResponse<?>>> readResponse ) { RequestKey requestKey; try { requestKey = new RequestKey(this, request.getUri()); } catch (Exception e) { return ExecutionFlow.error(e); } if (!isRunning()) { return ExecutionFlow.error(decorate(new HttpClientException("The client is closed, unable to send request."))); } // first: connect return connectionManager.connect(requestKey, blockHint, preferredScheduler) .flatMap(poolHandle -> { poolHandle.touch(); preferredScheduler.set(poolHandle.channel.eventLoop()); // build the raw request request.setAttribute(NettyClientHttpRequest.CHANNEL, poolHandle.channel); URI requestURI = request.getUri(); boolean permitsBody = io.micronaut.http.HttpMethod.permitsRequestBody(request.getMethod()); CloseableByteBody byteBody; try { byteBody = buildNettyRequest( request, requestURI, request .getContentType() .orElse(MediaType.APPLICATION_JSON_TYPE), permitsBody, poolHandle.channel ); } catch (HttpPostRequestEncoder.ErrorDataEncoderException e) { poolHandle.release(); return ExecutionFlow.error(e); } // send the raw request return sendRawRequest(poolHandle, request, byteBody); }) .flatMap(byteBodyResponse -> { // handle redirects or map the response bytes int code = byteBodyResponse.code(); HttpHeaders nettyHeaders = byteBodyResponse.getHeaders().getNettyHeaders(); if (code > 300 && code < 400 && configuration.isFollowRedirects() && nettyHeaders.contains(HttpHeaderNames.LOCATION)) { byteBodyResponse.close(); String location = nettyHeaders.get(HttpHeaderNames.LOCATION); MutableHttpRequest<Object> redirectRequest; if (code == 307 || code == 308) { redirectRequest = io.micronaut.http.HttpRequest.create(request.getMethod(), location); request.getBody().ifPresent(redirectRequest::body); } else { redirectRequest = io.micronaut.http.HttpRequest.GET(location); } setRedirectHeaders(request, redirectRequest); return resolveRedirectURI(request, redirectRequest) .flatMap(uri -> sendRequestWithRedirects(propagatedContext, blockHint, redirectRequest.uri(uri), readResponse)); } else { io.micronaut.http.HttpHeaders headers = byteBodyResponse.getHeaders(); if (log.isTraceEnabled()) { log.trace("HTTP Client Response Received ({}) for Request: {} {}", byteBodyResponse.code(), request.getMethodName(), request.getUri()); HttpHeadersUtil.trace(log, headers.names(), headers::getAll); } return readResponse.apply(request, byteBodyResponse); } }); } /** * This is the low-level request method, without redirect handling and with raw body bytes. * * @param poolHandle The pool handle to send the request on * @param request The request to send * @param byteBody The request body * @return A mono containing the response */ private ExecutionFlow<NettyClientByteBodyResponse> sendRawRequest( ConnectionManager.PoolHandle poolHandle, io.micronaut.http.HttpRequest<?> request, CloseableByteBody byteBody ) { poolHandle.touch(); URI uri = request.getUri(); String uriWithoutHost = uri.getRawPath(); if (uri.getRawQuery() != null) { uriWithoutHost += "?" + uri.getRawQuery(); } HttpRequest nettyRequest = NettyHttpRequestBuilder.asBuilder(request) .toHttpRequestWithoutBody() .setUri(uriWithoutHost); DelayedExecutionFlow<NettyClientByteBodyResponse> flow = DelayedExecutionFlow.create(); // need to run the create() on the event loop so that pipeline modification happens synchronously if (poolHandle.channel.eventLoop().inEventLoop()) { sendRawRequest0(poolHandle, request, byteBody, flow, nettyRequest); } else { poolHandle.channel.eventLoop().execute(() -> sendRawRequest0(poolHandle, request, byteBody, flow, nettyRequest)); } return flow; } private void sendRawRequest0(ConnectionManager.PoolHandle poolHandle, io.micronaut.http.HttpRequest<?> request, CloseableByteBody byteBody, DelayedExecutionFlow<NettyClientByteBodyResponse> sink, HttpRequest nettyRequest) { if (log.isDebugEnabled()) { log.debug("Sending HTTP {} to {}", request.getMethodName(), request.getUri()); } boolean expectContinue = HttpUtil.is100ContinueExpected(nettyRequest); ChannelPipeline pipeline = poolHandle.channel.pipeline(); OptionalLong length = byteBody.expectedLength(); // if the body is streamed, we have a StreamWriter, otherwise we have a ByteBuf. StreamWriter streamWriter; ByteBuf byteBuf; if (byteBody instanceof AvailableByteBody available) { byteBuf = NettyByteBodyFactory.toByteBuf(available); streamWriter = null; } else { streamWriter = new StreamWriter(new NettyByteBodyFactory(poolHandle.channel()).toStreaming(byteBody), e -> { poolHandle.taint(); completeExceptionallySafe(sink, e); }); pipeline.addLast(streamWriter); byteBuf = null; } if (log.isTraceEnabled()) { HttpHeadersUtil.trace(log, nettyRequest.headers().names(), nettyRequest.headers()::getAll); if (byteBuf != null) { traceBody("Request", byteBuf); } } pipeline.addLast(ChannelPipelineCustomizer.HANDLER_MICRONAUT_HTTP_RESPONSE, new Http1ResponseHandler(new Http1ResponseHandler.ResponseListener() { boolean stillExpectingContinue = expectContinue; @Override public void fail(ChannelHandlerContext ctx, Throwable cause) { poolHandle.taint(); completeExceptionallySafe(sink, handleResponseError(request, cause)); } @Override public void continueReceived(ChannelHandlerContext ctx) { if (stillExpectingContinue) { stillExpectingContinue = false; if (streamWriter == null) { ctx.writeAndFlush(new DefaultLastHttpContent(byteBuf), ctx.voidPromise()); } else { streamWriter.startWriting(); } } } @Override public void complete(io.netty.handler.codec.http.HttpResponse response, CloseableByteBody body) { if (!HttpUtil.isKeepAlive(response)) { poolHandle.taint(); } sink.complete(new NettyClientByteBodyResponse(response, body, conversionService)); } @Override public BodySizeLimits sizeLimits() { return DefaultHttpClient.this.sizeLimits(); } @Override public boolean isHeadResponse() { return nettyRequest.method().equals(HttpMethod.HEAD); } @Override public void finish(ChannelHandlerContext ctx) { ctx.pipeline().remove(ChannelPipelineCustomizer.HANDLER_MICRONAUT_HTTP_RESPONSE); if (streamWriter != null) { if (!streamWriter.isCompleted()) { // if there was an error, and we didn't fully write the request yet, the // connection cannot be reused poolHandle.taint(); } ctx.pipeline().remove(streamWriter); } if (stillExpectingContinue && byteBuf != null) { byteBuf.release(); } poolHandle.release(); } })); poolHandle.notifyRequestPipelineBuilt(); HttpHeaders headers = nettyRequest.headers(); if (length.isPresent()) { headers.remove(HttpHeaderNames.TRANSFER_ENCODING); if (length.getAsLong() != 0 || permitsRequestBody(nettyRequest.method())) { headers.set(HttpHeaderNames.CONTENT_LENGTH, length.getAsLong()); } } else { headers.remove(HttpHeaderNames.CONTENT_LENGTH); headers.set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); } if (!poolHandle.http2) { if (poolHandle.canReturn()) { nettyRequest.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE); } else { nettyRequest.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE); } } Channel channel = poolHandle.channel(); if (streamWriter == null) { if (!expectContinue) { // it's a bit more efficient to use a full request for HTTP/2 channel.writeAndFlush(new DefaultFullHttpRequest( nettyRequest.protocolVersion(), nettyRequest.method(), nettyRequest.uri(), byteBuf, nettyRequest.headers(), EmptyHttpHeaders.INSTANCE ), channel.voidPromise()); } else { channel.writeAndFlush(nettyRequest, channel.voidPromise()); } } else { channel.writeAndFlush(nettyRequest, channel.voidPromise()); if (!expectContinue) { streamWriter.startWriting(); } } } private ReadBuffer charSequenceToByteBuf(CharSequence bodyValue, MediaType requestContentType) { return NettyReadBufferFactory.of(ByteBufAllocator.DEFAULT).copyOf(bodyValue.toString(), requestContentType.getCharset().orElse(defaultCharset)); } private String getHostHeader(URI requestURI) { RequestKey requestKey = new RequestKey(this, requestURI); StringBuilder host = new StringBuilder(requestKey.getHost()); int port = requestKey.getPort(); if (port > -1 && port != 80 && port != 443) { host.append(":").append(port); } return host.toString(); } private CloseableByteBody buildFormRequest( MutableHttpRequest<?> request, NettyByteBodyFactory bodyFactory, ThrowingFunction<HttpRequest, HttpPostRequestEncoder, HttpPostRequestEncoder.ErrorDataEncoderException> buildMethod ) throws HttpPostRequestEncoder.ErrorDataEncoderException { // this function acts like a wrapper around HttpPostRequestEncoder. HttpPostRequestEncoder // takes a request + form data and transforms it to a request + bytes. Because we only want // the bytes, we need to copy the data from the netty request back to the original // MutableHttpRequest. This is just the Content-Type header, which sometimes gets an extra // boundary specifier that we need. // build the mock netty request (only the content-type matters) HttpRequest nettyRequest = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); List<AsciiString> relevantHeaders = List.of(HttpHeaderNames.CONTENT_TYPE); for (AsciiString header : relevantHeaders) { nettyRequest.headers().add(header, request.getHeaders().getAll(header)); } HttpPostRequestEncoder encoder = buildMethod.apply(nettyRequest); HttpRequest finalized = encoder.finalizeRequest(); // copy back the content-type for (AsciiString header : relevantHeaders) { request.getHeaders().remove(header); for (String value : finalized.headers().getAll(header)) { request.getHeaders().add(header, value); } } // return the body bytes if (encoder.isChunked()) { Flux<ByteBuf> bytes = Flux.create(em -> { em.onRequest(n -> { try { while (n-- > 0) { HttpContent chunk = encoder.readChunk(ByteBufAllocator.DEFAULT); if (chunk == null) { assert encoder.isEndOfInput(); em.complete(); break; } em.next(chunk.content()); } } catch (Exception e) { em.error(e); } }); em.onDispose(encoder::cleanFiles); }); if (blockingExecutor != null && encoder.getBodyListAttributes().stream().anyMatch(d -> !(d instanceof HttpData hd) || !hd.isInMemory())) { // readChunk in the above code can block. bytes = bytes.subscribeOn(Schedulers.fromExecutor(blockingExecutor)); } return bodyFactory.adapt(bytes, null, null); } else { return bodyFactory.adapt(((FullHttpRequest) finalized).content()); } } private HttpPostRequestEncoder buildFormDataRequest(HttpRequest baseRequest, Object bodyValue) throws HttpPostRequestEncoder.ErrorDataEncoderException { HttpPostRequestEncoder postRequestEncoder = new HttpPostRequestEncoder(baseRequest, false); Map<String, Object> formData; if (bodyValue instanceof Map) { //noinspection unchecked formData = (Map<String, Object>) bodyValue; } else { formData = BeanMap.of(bodyValue); } for (Map.Entry<String, Object> entry : formData.entrySet()) { Object value = entry.getValue(); if (value != null) { if (value instanceof Collection<?> collection) { for (Object val : collection) { addBodyAttribute(postRequestEncoder, entry.getKey(), val); } } else { addBodyAttribute(postRequestEncoder, entry.getKey(), value); } } } return postRequestEncoder; } private void addBodyAttribute(HttpPostRequestEncoder postRequestEncoder, String key, Object value) throws HttpPostRequestEncoder.ErrorDataEncoderException { Optional<String> converted = conversionService.convert(value, String.class); if (converted.isPresent()) { postRequestEncoder.addBodyAttribute(key, converted.get()); } } private HttpPostRequestEncoder buildMultipartRequest(HttpRequest baseRequest, Object bodyValue) throws HttpPostRequestEncoder.ErrorDataEncoderException { HttpDataFactory factory = new DefaultHttpDataFactory(DefaultHttpDataFactory.MINSIZE); HttpPostRequestEncoder postRequestEncoder = new HttpPostRequestEncoder(factory, baseRequest, true, CharsetUtil.UTF_8, HttpPostRequestEncoder.EncoderMode.HTML5); if (bodyValue instanceof MultipartBody.Builder builder) { bodyValue = builder.build(); } if (bodyValue instanceof MultipartBody multipartBody) { postRequestEncoder.setBodyHttpDatas(multipartBody.getData(new MultipartDataFactory<>() { @NonNull @Override public InterfaceHttpData createFileUpload(@NonNull String name, @NonNull String filename, @NonNull MediaType contentType, @Nullable String encoding, @Nullable Charset charset, long length) { return factory.createFileUpload( baseRequest, name, filename, contentType.toString(), encoding, charset, length ); } @NonNull @Override public InterfaceHttpData createAttribute(@NonNull String name, @NonNull String value) { return factory.createAttribute( baseRequest, name, value ); } @Override public void setContent(InterfaceHttpData fileUploadObject, Object content) throws IOException { if (fileUploadObject instanceof FileUpload fu) { if (content instanceof InputStream stream) { fu.setContent(stream); } else if (content instanceof File file) { fu.setContent(file); } else if (content instanceof byte[] bytes) { final ByteBuf buffer = Unpooled.wrappedBuffer(bytes); fu.setContent(buffer); } } } })); } else { throw new MultipartException("The type %s is not a supported type for a multipart request body".formatted(bodyValue.getClass().getName())); } return postRequestEncoder; } private void traceBody(String type, ByteBuf content) { log.trace("{} Body", type); log.trace("----"); log.trace(content.toString(defaultCharset)); log.trace("----"); } private void traceChunk(ByteBuf content) { log.trace("Sending Chunk"); log.trace("----"); log.trace(content.toString(defaultCharset)); log.trace("----"); } private static MediaTypeCodecRegistry createDefaultMediaTypeRegistry() { JsonMapper mapper = JsonMapper.createDefault(); ApplicationConfiguration configuration = new ApplicationConfiguration(); return MediaTypeCodecRegistry.of( new JsonMediaTypeCodec(mapper, configuration, null), new JsonStreamMediaTypeCodec(mapper, configuration, null) ); } private static MessageBodyHandlerRegistry createDefaultMessageBodyHandlerRegistry() { ApplicationConfiguration applicationConfiguration = new ApplicationConfiguration(); ContextlessMessageBodyHandlerRegistry registry = new ContextlessMessageBodyHandlerRegistry( applicationConfiguration, NettyByteBufferFactory.DEFAULT, new NettyByteBufMessageBodyHandler(), new WritableBodyWriter(applicationConfiguration) ); JsonMapper mapper = JsonMapper.createDefault(); registry.add(MediaType.APPLICATION_JSON_TYPE, new NettyJsonHandler<>(mapper)); registry.add(MediaType.APPLICATION_JSON_TYPE, new CharSequenceBodyWriter(StandardCharsets.UTF_8)); registry.add(MediaType.APPLICATION_JSON_STREAM_TYPE, new NettyJsonStreamHandler<>(mapper)); return registry; } static boolean isSecureScheme(String scheme) { // fast path if (scheme.equals("http")) { return false; } if (scheme.equals("https")) { return true; } // actual case-insensitive check return io.micronaut.http.HttpRequest.SCHEME_HTTPS.equalsIgnoreCase(scheme) || SCHEME_WSS.equalsIgnoreCase(scheme); } private <E extends HttpClientException> E decorate(E exc) { return HttpClientExceptionUtils.populateServiceId(exc, informationalServiceId, configuration); } private @NonNull HttpClientException handleResponseError(io.micronaut.http.HttpRequest<?> finalRequest, Throwable cause) { String message = cause.getMessage(); if (message == null) { message = cause.getClass().getSimpleName(); } if (log.isTraceEnabled()) { log.trace("HTTP Client exception ({}) occurred for request : {} {}", message, finalRequest.getMethodName(), finalRequest.getUri()); } HttpClientException result; if (cause instanceof io.micronaut.http.exceptions.ContentLengthExceededException clee) { result = decorate(new ContentLengthExceededException(clee.getMessage())); } else if (cause instanceof BufferLengthExceededException blee) { result = decorate(new ContentLengthExceededException(blee.getAdvertisedLength(), blee.getReceivedLength())); } else if (cause instanceof io.netty.handler.timeout.ReadTimeoutException) { result = ReadTimeoutException.TIMEOUT_EXCEPTION; } else if (cause instanceof HttpClientException hce) { result = decorate(hce); } else { result = decorate(new HttpClientException("Error occurred reading HTTP response: " + message, cause)); } return result; } private static void setRedirectHeaders(io.micronaut.http.@Nullable HttpRequest<?> request, MutableHttpRequest<Object> redirectRequest) { if (request != null) { for (Map.Entry<String, List<String>> originalHeader : request.getHeaders()) { if (!REDIRECT_HEADER_BLOCKLIST.contains(originalHeader.getKey())) { final List<String> originalHeaderValue = originalHeader.getValue(); if (originalHeaderValue != null && !originalHeaderValue.isEmpty()) { for (String value : originalHeaderValue) { if (value != null) { redirectRequest.header(originalHeader.getKey(), value); } } } } } } } private BodySizeLimits sizeLimits() { return new BodySizeLimits(Long.MAX_VALUE, configuration.getMaxContentLength()); } private static <O, E> boolean shouldConvertWithBodyType(io.netty.handler.codec.http.HttpResponse msg, HttpClientConfiguration configuration, Argument<O> bodyType, Argument<E> errorType) { if (msg.status().code() < 400) { return true; } return !configuration.isExceptionOnErrorStatus() && bodyType.equalsType(errorType); } /** * Create a {@link HttpClientResponseException} if parsing of the HTTP error body failed. */ private HttpClientResponseException makeErrorBodyParseError(FullHttpResponse fullResponse, Throwable t) { FullNettyClientHttpResponse<Object> errorResponse = new FullNettyClientHttpResponse<>( fullResponse, handlerRegistry, null, false, conversionService ); return decorate(new HttpClientResponseException( "Error decoding HTTP error response body: " + t.getMessage(), t, errorResponse, null )); } /** * Create a {@link HttpClientResponseException} from a response with a failed HTTP status. */ private HttpClientResponseException makeErrorFromRequestBody(Argument<?> errorType, HttpResponseStatus status, FullNettyClientHttpResponse<?> response) { if (errorType != null && errorType != HttpClient.DEFAULT_ERROR_TYPE) { return decorate(new HttpClientResponseException( status.reasonPhrase(), null, response, new HttpClientErrorDecoder() { @Override public Argument<?> getErrorType(MediaType mediaType) { return errorType; } } )); } else { return decorate(new HttpClientResponseException(status.reasonPhrase(), response)); } } private static boolean hasBody(HttpResponse<?> response) { if (response.code() >= HttpStatus.CONTINUE.getCode() && response.code() < HttpStatus.OK.getCode()) { return false; } if (response.code() == HttpResponseStatus.NO_CONTENT.code() || response.code() == HttpResponseStatus.NOT_MODIFIED.code()) { return false; } OptionalLong contentLength = response.getHeaders().contentLength(); return contentLength.isEmpty() || contentLength.getAsLong() != 0; } /** * Key used for connection pooling and determining host/port. */ public static final
DefaultHttpClient
java
assertj__assertj-core
assertj-core/src/test/java/org/assertj/core/util/Lists_newArrayList_withIterator_Test.java
{ "start": 972, "end": 1680 }
class ____ { @Test void should_return_List_containing_all_elements_in_iterator() { String[] expected = { "One", "Two" }; Iterator<String> elements = asList(expected).iterator(); ArrayList<String> list = Lists.newArrayList(elements); assertThat(list).containsExactly(expected); } @Test void should_return_null_if_iterator_is_null() { Iterator<?> elements = null; assertThat(Lists.newArrayList(elements)).isNull(); } @Test void should_return_empty_List_if_iterator_is_empty() { Iterator<String> elements = Collections.emptyIterator(); ArrayList<String> list = Lists.newArrayList(elements); assertThat(list).isEmpty(); } }
Lists_newArrayList_withIterator_Test
java
google__guava
android/guava/src/com/google/common/collect/ArrayTable.java
{ "start": 5859, "end": 9033 }
enum ____ during * serialization and deserialization. */ /** * Creates an {@code ArrayTable} with the mappings in the provided table. * * <p>If {@code table} includes a mapping with row key {@code r} and a separate mapping with * column key {@code c}, the returned table contains a mapping with row key {@code r} and column * key {@code c}. If that row key / column key pair in not in {@code table}, the pair maps to * {@code null} in the generated table. * * <p>The returned table allows subsequent {@code put} calls with the row keys in {@code * table.rowKeySet()} and the column keys in {@code table.columnKeySet()}. Calling {@link #put} * with other keys leads to an {@code IllegalArgumentException}. * * <p>The ordering of {@code table.rowKeySet()} and {@code table.columnKeySet()} determines the * row and column iteration ordering of the returned table. * * @throws NullPointerException if {@code table} has a null key */ @SuppressWarnings("unchecked") // TODO(cpovirk): Make constructor accept wildcard types? public static <R, C, V> ArrayTable<R, C, V> create(Table<R, C, ? extends @Nullable V> table) { return (table instanceof ArrayTable) ? new ArrayTable<R, C, V>((ArrayTable<R, C, V>) table) : new ArrayTable<R, C, V>(table); } private final ImmutableList<R> rowList; private final ImmutableList<C> columnList; // TODO(jlevy): Add getters returning rowKeyToIndex and columnKeyToIndex? private final ImmutableMap<R, Integer> rowKeyToIndex; private final ImmutableMap<C, Integer> columnKeyToIndex; private final @Nullable V[][] array; private ArrayTable(Iterable<? extends R> rowKeys, Iterable<? extends C> columnKeys) { this.rowList = ImmutableList.copyOf(rowKeys); this.columnList = ImmutableList.copyOf(columnKeys); checkArgument(rowList.isEmpty() == columnList.isEmpty()); /* * TODO(jlevy): Support only one of rowKey / columnKey being empty? If we * do, when columnKeys is empty but rowKeys isn't, rowKeyList() can contain * elements but rowKeySet() will be empty and containsRow() won't * acknowledge them. */ rowKeyToIndex = Maps.indexMap(rowList); columnKeyToIndex = Maps.indexMap(columnList); @SuppressWarnings("unchecked") @Nullable V[][] tmpArray = (@Nullable V[][]) new Object[rowList.size()][columnList.size()]; array = tmpArray; // Necessary because in GWT the arrays are initialized with "undefined" instead of null. eraseAll(); } private ArrayTable(Table<R, C, ? extends @Nullable V> table) { this(table.rowKeySet(), table.columnKeySet()); putAll(table); } private ArrayTable(ArrayTable<R, C, V> table) { rowList = table.rowList; columnList = table.columnList; rowKeyToIndex = table.rowKeyToIndex; columnKeyToIndex = table.columnKeyToIndex; @SuppressWarnings("unchecked") @Nullable V[][] copy = (@Nullable V[][]) new Object[rowList.size()][columnList.size()]; array = copy; for (int i = 0; i < rowList.size(); i++) { arraycopy(table.array[i], 0, copy[i], 0, table.array[i].length); } } private abstract static
sizes
java
elastic__elasticsearch
server/src/test/java/org/elasticsearch/cluster/coordination/AtomicRegisterCoordinatorTests.java
{ "start": 19793, "end": 20425 }
class ____ implements HeartbeatStore { private final AtomicReference<Heartbeat> hearbeatRef; SharedHeartbeatStore(AtomicReference<Heartbeat> hearbeatRef) { this.hearbeatRef = hearbeatRef; } @Override public void writeHeartbeat(Heartbeat newHeartbeat, ActionListener<Void> listener) { hearbeatRef.set(newHeartbeat); listener.onResponse(null); } @Override public void readLatestHeartbeat(ActionListener<Heartbeat> listener) { listener.onResponse(hearbeatRef.get()); } } private static
SharedHeartbeatStore
java
FasterXML__jackson-databind
src/main/java/tools/jackson/databind/ValueSerializer.java
{ "start": 16292, "end": 16349 }
class ____ extends ValueSerializer<Object> { } }
None
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/cfg/persister/GoofyPersisterClassProvider.java
{ "start": 24871, "end": 34331 }
class ____ implements CollectionPersister { public NoopCollectionPersister( Collection collectionBinding, CollectionDataAccess cacheAccessStrategy, RuntimeModelCreationContext creationContext) { throw new GoofyException(NoopCollectionPersister.class); } @Override public NavigableRole getNavigableRole() { return null; } public void initialize(Object key, SharedSessionContractImplementor session) throws HibernateException { //To change body of implemented methods use File | Settings | File Templates. } public boolean hasCache() { return false; //To change body of implemented methods use File | Settings | File Templates. } public CollectionDataAccess getCacheAccessStrategy() { return null; //To change body of implemented methods use File | Settings | File Templates. } public CacheEntryStructure getCacheEntryStructure() { return null; //To change body of implemented methods use File | Settings | File Templates. } @Override public boolean useShallowQueryCacheLayout() { return false; } public CollectionType getCollectionType() { throw new UnsupportedOperationException(); } public Type getKeyType() { return null; //To change body of implemented methods use File | Settings | File Templates. } public Type getIndexType() { return null; //To change body of implemented methods use File | Settings | File Templates. } public Type getElementType() { return null; //To change body of implemented methods use File | Settings | File Templates. } public Class<?> getElementClass() { return null; //To change body of implemented methods use File | Settings | File Templates. } public boolean isPrimitiveArray() { return false; //To change body of implemented methods use File | Settings | File Templates. } public boolean isArray() { return false; //To change body of implemented methods use File | Settings | File Templates. } public boolean isOneToMany() { return false; //To change body of implemented methods use File | Settings | File Templates. } public boolean isManyToMany() { return false; //To change body of implemented methods use File | Settings | File Templates. } public boolean hasIndex() { return false; //To change body of implemented methods use File | Settings | File Templates. } public boolean isLazy() { return false; //To change body of implemented methods use File | Settings | File Templates. } public boolean isInverse() { return false; //To change body of implemented methods use File | Settings | File Templates. } public void remove(Object id, SharedSessionContractImplementor session) throws HibernateException { //To change body of implemented methods use File | Settings | File Templates. } public void recreate(PersistentCollection<?> collection, Object key, SharedSessionContractImplementor session) throws HibernateException { //To change body of implemented methods use File | Settings | File Templates. } public void deleteRows(PersistentCollection<?> collection, Object key, SharedSessionContractImplementor session) throws HibernateException { //To change body of implemented methods use File | Settings | File Templates. } public void updateRows(PersistentCollection<?> collection, Object key, SharedSessionContractImplementor session) throws HibernateException { //To change body of implemented methods use File | Settings | File Templates. } public void insertRows(PersistentCollection<?> collection, Object key, SharedSessionContractImplementor session) throws HibernateException { //To change body of implemented methods use File | Settings | File Templates. } public String getRole() { return null; //To change body of implemented methods use File | Settings | File Templates. } public EntityPersister getOwnerEntityPersister() { return null; //To change body of implemented methods use File | Settings | File Templates. } public IdentifierGenerator getIdentifierGenerator() { return null; //To change body of implemented methods use File | Settings | File Templates. } public Type getIdentifierType() { return null; //To change body of implemented methods use File | Settings | File Templates. } public boolean hasOrphanDelete() { return false; //To change body of implemented methods use File | Settings | File Templates. } public boolean hasOrdering() { return false; //To change body of implemented methods use File | Settings | File Templates. } public boolean hasManyToManyOrdering() { return false; //To change body of implemented methods use File | Settings | File Templates. } public String[] getCollectionSpaces() { return new String[0]; //To change body of implemented methods use File | Settings | File Templates. } public boolean isCascadeDeleteEnabled() { return false; //To change body of implemented methods use File | Settings | File Templates. } public boolean isVersioned() { return false; //To change body of implemented methods use File | Settings | File Templates. } public boolean isMutable() { return false; //To change body of implemented methods use File | Settings | File Templates. } public void postInstantiate() throws MappingException { //To change body of implemented methods use File | Settings | File Templates. } public SessionFactoryImplementor getFactory() { return null; //To change body of implemented methods use File | Settings | File Templates. } public boolean isAffectedByEnabledFilters(SharedSessionContractImplementor session) { return false; //To change body of implemented methods use File | Settings | File Templates. } public String[] getKeyColumnAliases(String suffix) { return new String[0]; //To change body of implemented methods use File | Settings | File Templates. } public String[] getIndexColumnAliases(String suffix) { return new String[0]; //To change body of implemented methods use File | Settings | File Templates. } public String[] getElementColumnAliases(String suffix) { return new String[0]; //To change body of implemented methods use File | Settings | File Templates. } public String getIdentifierColumnAlias(String suffix) { return null; //To change body of implemented methods use File | Settings | File Templates. } public int getSize(Object key, SharedSessionContractImplementor session) { return 0; //To change body of implemented methods use File | Settings | File Templates. } public boolean indexExists(Object key, Object index, SharedSessionContractImplementor session) { return false; //To change body of implemented methods use File | Settings | File Templates. } public boolean elementExists(Object key, Object element, SharedSessionContractImplementor session) { return false; //To change body of implemented methods use File | Settings | File Templates. } public Object getElementByIndex(Object key, Object index, SharedSessionContractImplementor session, Object owner) { return null; //To change body of implemented methods use File | Settings | File Templates. } @Override public String getMappedByProperty() { return null; } @Override public Comparator<?> getSortingComparator() { return null; } @Override public CollectionSemantics<?,?> getCollectionSemantics() { return null; } @Override public void applyBaseManyToManyRestrictions(Consumer<Predicate> predicateConsumer, TableGroup tableGroup, boolean useQualifier, Map<String, Filter> enabledFilters, Set<String> treatAsDeclarations, SqlAstCreationState creationState) { } @Override public void processQueuedOps(PersistentCollection<?> collection, Object key, SharedSessionContractImplementor session) throws HibernateException { } @Override public void applyFilterRestrictions( Consumer<Predicate> predicateConsumer, TableGroup tableGroup, boolean useQualifier, Map<String, Filter> enabledFilters, boolean onlyApplyLoadByKeyFilters, SqlAstCreationState creationState) { } @Override public void applyBaseRestrictions(Consumer<Predicate> predicateConsumer, TableGroup tableGroup, boolean useQualifier, Map<String, Filter> enabledFilters, Set<String> treatAsDeclarations, SqlAstCreationState creationState) { } @Override public void applyBaseRestrictions( Consumer<Predicate> predicateConsumer, TableGroup tableGroup, boolean useQualifier, Map<String, Filter> enabledFilters, boolean onlyApplyLoadByKeyFilters, Set<String> treatAsDeclarations, SqlAstCreationState creationState) { } @Override public boolean hasWhereRestrictions() { return false; } @Override public void applyWhereRestrictions(Consumer<Predicate> predicateConsumer, TableGroup tableGroup, boolean useQualifier, SqlAstCreationState creationState) { } @Override public String getIdentifierColumnName() { return ""; } @Override public String getTableName() { return ""; } @Override public String selectFragment(String alias, String columnSuffix) { return ""; } @Override public String[] getCollectionPropertyColumnAliases(String propertyName, String string) { return new String[0]; } @Override public EntityPersister getElementPersister() { return null; } } }
NoopCollectionPersister
java
spring-projects__spring-framework
spring-test/src/main/java/org/springframework/test/annotation/DirtiesContext.java
{ "start": 4480, "end": 4723 }
interface ____ { /** * The <i>mode</i> to use when a test method is annotated with * {@code @DirtiesContext}. * <p>Defaults to {@link MethodMode#AFTER_METHOD AFTER_METHOD}. * <p>Setting the method mode on an annotated test
DirtiesContext
java
mapstruct__mapstruct
processor/src/main/java/org/mapstruct/ap/internal/model/source/MethodMatcher.java
{ "start": 4913, "end": 18691 }
class ____ { private TypeFactory typeFactory; private TypeUtils typeUtils; private Method candidateMethod; private List<Type> sourceTypes; private Type targetType; GenericAnalyser(TypeFactory typeFactory, TypeUtils typeUtils, Method candidateMethod, List<Type> sourceTypes, Type targetType) { this.typeFactory = typeFactory; this.typeUtils = typeUtils; this.candidateMethod = candidateMethod; this.sourceTypes = sourceTypes; this.targetType = targetType; } Type candidateReturnType = null; List<Type> candidateParTypes; private boolean lineUp() { if ( candidateMethod.getParameters().size() != sourceTypes.size() ) { return false; } if ( !candidateMethod.getTypeParameters().isEmpty() ) { this.candidateParTypes = new ArrayList<>(); // Per generic method parameter the associated type variable candidates Map<Type, TypeVarCandidate> methodParCandidates = new HashMap<>(); // Get candidates boolean success = getCandidates( methodParCandidates ); if ( !success ) { return false; } // Check type bounds boolean withinBounds = candidatesWithinBounds( methodParCandidates ); if ( !withinBounds ) { return false; } // Represent result as map. Map<Type, Type> resolvedPairs = new HashMap<>(); for ( TypeVarCandidate candidate : methodParCandidates.values() ) { for ( Type.ResolvedPair pair : candidate.pairs) { resolvedPairs.put( pair.getParameter(), pair.getMatch() ); } } // Resolve parameters and return type by using the found candidates int nrOfMethodPars = candidateMethod.getParameters().size(); for ( int i = 0; i < nrOfMethodPars; i++ ) { Type candidateType = resolve( candidateMethod.getParameters().get( i ).getType(), resolvedPairs ); if ( candidateType == null ) { return false; } this.candidateParTypes.add( candidateType ); } if ( !candidateMethod.getReturnType().isVoid() ) { this.candidateReturnType = resolve( candidateMethod.getReturnType(), resolvedPairs ); if ( this.candidateReturnType == null ) { return false; } } else { this.candidateReturnType = candidateMethod.getReturnType(); } } else { this.candidateParTypes = candidateMethod.getParameters().stream() .map( Parameter::getType ) .collect( Collectors.toList() ); this.candidateReturnType = candidateMethod.getReturnType(); } return true; } /** * {@code <T, U extends Number> T map( U in ) } * * Resolves all method generic parameter candidates * * @param methodParCandidates Map, keyed by the method generic parameter (T, U extends Number), with their * respective candidates * * @return false no match or conflict has been found * */ boolean getCandidates( Map<Type, TypeVarCandidate> methodParCandidates) { int nrOfMethodPars = candidateMethod.getParameters().size(); Type returnType = candidateMethod.getReturnType(); for ( int i = 0; i < nrOfMethodPars; i++ ) { Type sourceType = sourceTypes.get( i ); Parameter par = candidateMethod.getParameters().get( i ); Type parType = par.getType(); boolean success = getCandidates( parType, sourceType, methodParCandidates ); if ( !success ) { return false; } } if ( !returnType.isVoid() ) { boolean success = getCandidates( returnType, targetType, methodParCandidates ); if ( !success ) { return false; } } return true; } /** * @param aCandidateMethodType parameter type or return type from candidate method * @param matchingType source type / target type to match * @param candidates Map, keyed by the method generic parameter, with the candidates * * @return false no match or conflict has been found */ boolean getCandidates(Type aCandidateMethodType, Type matchingType, Map<Type, TypeVarCandidate> candidates ) { if ( !( aCandidateMethodType.isTypeVar() || aCandidateMethodType.isArrayTypeVar() || aCandidateMethodType.isWildCardBoundByTypeVar() || hasGenericTypeParameters( aCandidateMethodType ) ) ) { // the typeFromCandidateMethod is not a generic (parameterized) type return true; } boolean foundAMatch = false; for ( Type mthdParType : candidateMethod.getTypeParameters() ) { // typeFromCandidateMethod itself is a generic type, e.g. <T> String method( T par ); // typeFromCandidateMethod is a generic arrayType e.g. <T> String method( T[] par ); // typeFromCandidateMethod is embedded in another type e.g. <T> String method( Callable<T> par ); // typeFromCandidateMethod is a wildcard, bounded by a typeVar // e.g. <T> String method( List<? extends T> in ) Type.ResolvedPair resolved = mthdParType.resolveParameterToType( matchingType, aCandidateMethodType ); if ( resolved.getMatch() == null ) { // we should be dealing with something containing a type parameter at this point. This is // covered with the checks above. Therefore resolved itself cannot be null. // If there is no match here, continue with the next candidate, perhaps there will a match with // the next method type parameter continue; } foundAMatch = true; // there is a rare case where we do not arrive here at all. // resolved something at this point, a candidate can be fetched or created TypeVarCandidate typeVarCandidate; if ( candidates.containsKey( mthdParType ) ) { typeVarCandidate = candidates.get( mthdParType ); } else { // add a new type typeVarCandidate = new TypeVarCandidate( ); candidates.put( mthdParType, typeVarCandidate ); } // check what we've resolved if ( resolved.getParameter().isTypeVar() ) { // it might be already set, but we just checked if its an equivalent type if ( typeVarCandidate.match == null ) { typeVarCandidate.match = resolved.getMatch(); typeVarCandidate.pairs.add( resolved ); } else if ( !areEquivalent( resolved.getMatch(), typeVarCandidate.match ) ) { // type has been resolved twice, but with a different candidate (conflict). Stop return false; } } else if ( resolved.getParameter().isArrayTypeVar() && resolved.getParameter().getComponentType().isAssignableTo( mthdParType ) ) { // e.g. <T extends Number> T map( List<T[]> in ), the match for T should be assignable // to the parameter T extends Number typeVarCandidate.pairs.add( resolved ); } else if ( resolved.getParameter().isWildCardBoundByTypeVar() && resolved.getParameter().getTypeBound().isAssignableTo( mthdParType ) ) { // e.g. <T extends Number> T map( List<? super T> in ), the match for ? super T should be assignable // to the parameter T extends Number typeVarCandidate.pairs.add( resolved ); } else { // none of the above return false; } } return foundAMatch; } /** * Checks whether all found candidates are within the bounds of the method type var. For instance * @<code><T, U extends Callable<T> U map( T in )</code>. Note that only the relation between the * match for U and Callable are checked. Not the correct parameter. * * @param methodParCandidates * * @return true when all within bounds. */ private boolean candidatesWithinBounds(Map<Type, TypeVarCandidate> methodParCandidates ) { for ( Map.Entry<Type, TypeVarCandidate> entry : methodParCandidates.entrySet() ) { for ( Type bound : entry.getKey().getTypeBounds() ) { for ( Type.ResolvedPair pair : entry.getValue().pairs ) { if ( entry.getKey().hasUpperBound() ) { if ( !pair.getMatch().asRawType().isAssignableTo( bound.asRawType() ) ) { return false; } } else { // lower bound if ( !bound.asRawType().isAssignableTo( pair.getMatch().asRawType() ) ) { return false; } } } } } return true; } private boolean hasGenericTypeParameters(Type typeFromCandidateMethod) { for ( Type typeParam : typeFromCandidateMethod.getTypeParameters() ) { if ( typeParam.isTypeVar() || typeParam.isWildCardBoundByTypeVar() || typeParam.isArrayTypeVar() ) { return true; } else { if ( hasGenericTypeParameters( typeParam ) ) { return true; } } } return false; } private Type resolve( Type typeFromCandidateMethod, Map<Type, Type> pairs ) { if ( typeFromCandidateMethod.isTypeVar() || typeFromCandidateMethod.isArrayTypeVar() ) { return pairs.get( typeFromCandidateMethod ); } else if ( hasGenericTypeParameters( typeFromCandidateMethod ) ) { TypeMirror[] typeArgs = new TypeMirror[ typeFromCandidateMethod.getTypeParameters().size() ]; for ( int i = 0; i < typeFromCandidateMethod.getTypeParameters().size(); i++ ) { Type typeFromCandidateMethodTypeParameter = typeFromCandidateMethod.getTypeParameters().get( i ); if ( hasGenericTypeParameters( typeFromCandidateMethodTypeParameter ) ) { // nested type var, lets resolve some more (recur) Type matchingType = resolve( typeFromCandidateMethodTypeParameter, pairs ); if ( matchingType == null ) { // something went wrong return null; } typeArgs[i] = matchingType.getTypeMirror(); } else if ( typeFromCandidateMethodTypeParameter.isWildCardBoundByTypeVar() || typeFromCandidateMethodTypeParameter.isTypeVar() || typeFromCandidateMethodTypeParameter.isArrayTypeVar() ) { Type matchingType = pairs.get( typeFromCandidateMethodTypeParameter ); if ( matchingType == null ) { // something went wrong return null; } // Use the boxed equivalent for the type arguments, // because a primitive type cannot be a type argument typeArgs[i] = matchingType.getBoxedEquivalent().getTypeMirror(); } else { // it is not a type var (e.g. Map<String, T> ), String is not a type var typeArgs[i] = typeFromCandidateMethodTypeParameter.getTypeMirror(); } } DeclaredType typeArg = typeUtils.getDeclaredType( typeFromCandidateMethod.getTypeElement(), typeArgs ); return typeFactory.getType( typeArg ); } else { // its not a type var or generic parameterized (e.g. just a plain type) return typeFromCandidateMethod; } } boolean areEquivalent( Type a, Type b ) { if ( a == null || b == null ) { return false; } return a.getBoxedEquivalent().equals( b.getBoxedEquivalent() ); } } private static
GenericAnalyser
java
spring-projects__spring-framework
spring-beans/src/test/java/org/springframework/beans/ExtendedBeanInfoTests.java
{ "start": 27983, "end": 28192 }
class ____ extends A { @Override public boolean isTargetMethod() { return false; } } BeanInfo bi = Introspector.getBeanInfo(B.class); // java.beans.Introspector returns the "wrong" declaring
B
java
apache__logging-log4j2
log4j-core-test/src/main/java/org/apache/logging/log4j/core/test/junit/Tags.java
{ "start": 922, "end": 1159 }
class ____ { /** * Tests that use LMAX Disruptor. Same name as the JUnit 4 category. */ public static final String ASYNC_LOGGERS = "org.apache.logging.log4j.core.test.categories.AsyncLoggers"; private Tags() {} }
Tags
java
quarkusio__quarkus
independent-projects/bootstrap/maven-resolver/src/main/java/io/quarkus/bootstrap/BootstrapDependencyProcessingException.java
{ "start": 147, "end": 527 }
class ____ extends BootstrapMavenException { /** * */ private static final long serialVersionUID = 1L; public BootstrapDependencyProcessingException(String message, Throwable cause) { super(message, cause); } public BootstrapDependencyProcessingException(String message) { super(message); } }
BootstrapDependencyProcessingException
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
{ "start": 5245, "end": 23166 }
class ____ { protected static RecordFactory recordFactory = RecordFactoryProvider .getRecordFactory(null); protected static FileContext localFS; protected static File localDir; protected static File localLogDir; protected static File remoteLogDir; protected static File tmpDir; protected NodeManagerMetrics metrics = NodeManagerMetrics.create(); public BaseContainerManagerTest() throws UnsupportedFileSystemException { localFS = FileContext.getLocalFSFileContext(); localDir = new File("target", this.getClass().getSimpleName() + "-localDir") .getAbsoluteFile(); localLogDir = new File("target", this.getClass().getSimpleName() + "-localLogDir") .getAbsoluteFile(); remoteLogDir = new File("target", this.getClass().getSimpleName() + "-remoteLogDir") .getAbsoluteFile(); tmpDir = new File("target", this.getClass().getSimpleName() + "-tmpDir"); } protected static Logger LOG = LoggerFactory.getLogger(BaseContainerManagerTest.class); protected static final int HTTP_PORT = 5412; protected Configuration conf = new YarnConfiguration(); protected Context context = new NMContext(new NMContainerTokenSecretManager( conf), new NMTokenSecretManagerInNM(), null, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf) { public int getHttpPort() { return HTTP_PORT; } @Override public ContainerExecutor getContainerExecutor() { return exec; } @Override public NMLogAggregationStatusTracker getNMLogAggregationStatusTracker() { NMLogAggregationStatusTracker mock = mock( NMLogAggregationStatusTracker.class); doNothing().when(mock).updateLogAggregationStatus( any(ApplicationId.class), any(LogAggregationStatus.class), anyLong(), anyString(), anyBoolean()); return mock; } }; protected ContainerExecutor exec; protected DeletionService delSrvc; protected String user = "nobody"; protected NodeHealthCheckerService nodeHealthChecker; protected LocalDirsHandlerService dirsHandler; protected final long DUMMY_RM_IDENTIFIER = 1234; private NodeResourceMonitorImpl nodeResourceMonitor = mock( NodeResourceMonitorImpl.class); private NodeHealthCheckerService nodeHealthCheckerService; private NodeStatusUpdater nodeStatusUpdater; protected ContainerManagerImpl containerManager = null; public NodeStatusUpdater getNodeStatusUpdater() { return nodeStatusUpdater; } public void setNodeStatusUpdater( NodeStatusUpdater nodeStatusUpdater) { this.nodeStatusUpdater = nodeStatusUpdater; } protected ContainerExecutor createContainerExecutor() { DefaultContainerExecutor exec = new DefaultContainerExecutor(); exec.setConf(conf); return spy(exec); } @BeforeEach public void setup() throws IOException { localFS.delete(new Path(localDir.getAbsolutePath()), true); localFS.delete(new Path(tmpDir.getAbsolutePath()), true); localFS.delete(new Path(localLogDir.getAbsolutePath()), true); localFS.delete(new Path(remoteLogDir.getAbsolutePath()), true); localDir.mkdir(); tmpDir.mkdir(); localLogDir.mkdir(); remoteLogDir.mkdir(); LOG.info("Created localDir in " + localDir.getAbsolutePath()); LOG.info("Created tmpDir in " + tmpDir.getAbsolutePath()); String bindAddress = "0.0.0.0:" + ServerSocketUtil.getPort(49162, 10); conf.set(YarnConfiguration.NM_ADDRESS, bindAddress); conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath()); conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogDir.getAbsolutePath()); conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "0.0.0.0:" + ServerSocketUtil.getPort(8040, 10)); conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1); // Default delSrvc exec = createContainerExecutor(); delSrvc = createDeletionService(); delSrvc.init(conf); dirsHandler = new LocalDirsHandlerService(); dirsHandler.init(conf); nodeHealthCheckerService = new NodeHealthCheckerService(dirsHandler); nodeStatusUpdater = new NodeStatusUpdaterImpl( context, new AsyncDispatcher(), nodeHealthCheckerService, metrics) { @Override protected ResourceTracker getRMClient() { return new LocalRMInterface(); }; @Override protected void stopRMProxy() { return; } @Override protected void startStatusUpdater() { return; // Don't start any updating thread. } @Override public long getRMIdentifier() { // There is no real RM registration, simulate and set RMIdentifier return DUMMY_RM_IDENTIFIER; } }; containerManager = createContainerManager(delSrvc); ((NMContext)context).setContainerManager(containerManager); ((NMContext)context).setContainerExecutor(exec); ((NMContext)context).setNodeResourceMonitor(nodeResourceMonitor); nodeStatusUpdater.init(conf); containerManager.init(conf); nodeStatusUpdater.start(); ((NMContext)context).setNodeStatusUpdater(nodeStatusUpdater); ((NMContext)context).setContainerStateTransitionListener( new NodeManager.DefaultContainerStateListener()); } protected ContainerManagerImpl createContainerManager(DeletionService delSrvc) { return new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater, metrics, dirsHandler) { @Override protected void authorizeGetAndStopContainerRequest( ContainerId containerId, Container container, boolean stopRequest, NMTokenIdentifier identifier, String remoteUser) throws YarnException { // do nothing } @Override protected void authorizeUser(UserGroupInformation remoteUgi, NMTokenIdentifier nmTokenIdentifier) { // do nothing } @Override protected void authorizeStartAndResourceIncreaseRequest( NMTokenIdentifier nmTokenIdentifier, ContainerTokenIdentifier containerTokenIdentifier, boolean startRequest) throws YarnException { // do nothing } @Override protected void updateNMTokenIdentifier( NMTokenIdentifier nmTokenIdentifier) throws InvalidToken { // Do nothing } @Override public Map<String, ByteBuffer> getAuxServiceMetaData() { Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>(); serviceData.put("AuxService1", ByteBuffer.wrap("AuxServiceMetaData1".getBytes())); serviceData.put("AuxService2", ByteBuffer.wrap("AuxServiceMetaData2".getBytes())); return serviceData; } @Override protected NMTokenIdentifier selectNMTokenIdentifier( UserGroupInformation remoteUgi) { return new NMTokenIdentifier(); } }; } protected DeletionService createDeletionService() { return new DeletionService(exec) { @Override public void delete(DeletionTask deletionTask) { // Don't do any deletions. LOG.info("Psuedo delete: user - " + user + ", type - " + deletionTask.getDeletionTaskType()); }; }; } @AfterEach public void tearDown() throws IOException, InterruptedException { if (containerManager != null) { containerManager.stop(); } createContainerExecutor().deleteAsUser(new DeletionAsUserContext.Builder() .setUser(user) .setSubDir(new Path(localDir.getAbsolutePath())) .setBasedirs(new Path[] {}) .build()); } public static void waitForContainerState( ContainerManagementProtocol containerManager, ContainerId containerID, ContainerState finalState) throws InterruptedException, YarnException, IOException { waitForContainerState(containerManager, containerID, Arrays.asList(finalState), 20); } public static void waitForContainerState( ContainerManagementProtocol containerManager, ContainerId containerID, ContainerState finalState, int timeOutMax) throws InterruptedException, YarnException, IOException { waitForContainerState(containerManager, containerID, Arrays.asList(finalState), timeOutMax); } public static void waitForContainerState( ContainerManagementProtocol containerManager, ContainerId containerID, List<ContainerState> finalStates, int timeOutMax) throws InterruptedException, YarnException, IOException { List<ContainerId> list = new ArrayList<ContainerId>(); list.add(containerID); GetContainerStatusesRequest request = GetContainerStatusesRequest.newInstance(list); ContainerStatus containerStatus = null; HashSet<ContainerState> fStates = new HashSet<>(finalStates); int timeoutSecs = 0; do { Thread.sleep(1000); containerStatus = containerManager.getContainerStatuses(request) .getContainerStatuses().get(0); LOG.info("Waiting for container to get into one of states " + fStates + ". Current state is " + containerStatus.getState()); timeoutSecs += 1; } while (!fStates.contains(containerStatus.getState()) && timeoutSecs < timeOutMax); LOG.info("Container state is " + containerStatus.getState()); assertTrue(fStates.contains(containerStatus.getState()), "ContainerState is not correct (timedout)"); } public static void waitForApplicationState( ContainerManagerImpl containerManager, ApplicationId appID, ApplicationState finalState) throws InterruptedException { // Wait for app-finish Application app = containerManager.getContext().getApplications().get(appID); int timeout = 0; while (!(app.getApplicationState().equals(finalState)) && timeout++ < 15) { LOG.info("Waiting for app to reach " + finalState + ".. Current state is " + app.getApplicationState()); Thread.sleep(1000); } assertTrue(app.getApplicationState().equals(finalState), "App is not in " + finalState + " yet!! Timedout!!"); } public static void waitForNMContainerState(ContainerManagerImpl containerManager, ContainerId containerID, org.apache.hadoop.yarn.server.nodemanager.containermanager .container.ContainerState finalState) throws InterruptedException, YarnException, IOException { waitForNMContainerState(containerManager, containerID, finalState, 20); } public static void waitForNMContainerState(ContainerManagerImpl containerManager, ContainerId containerID, org.apache.hadoop.yarn.server.nodemanager.containermanager .container.ContainerState finalState, int timeOutMax) throws InterruptedException, YarnException, IOException { waitForNMContainerState(containerManager, containerID, Arrays.asList(finalState), timeOutMax); } public static void waitForNMContainerState(ContainerManagerImpl containerManager, ContainerId containerID, List<org.apache.hadoop.yarn.server.nodemanager.containermanager .container.ContainerState> finalStates, int timeOutMax) throws InterruptedException, YarnException, IOException { Container container = null; org.apache.hadoop.yarn.server.nodemanager .containermanager.container.ContainerState currentState = null; int timeoutSecs = 0; do { Thread.sleep(1000); container = containerManager.getContext().getContainers().get(containerID); if (container != null) { currentState = container.getContainerState(); } if (currentState != null) { LOG.info("Waiting for NM container to get into one of the following " + "states: " + finalStates + ". Current state is " + currentState); } timeoutSecs += 1; } while (!finalStates.contains(currentState) && timeoutSecs < timeOutMax); LOG.info("Container state is " + currentState); assertTrue(finalStates.contains(currentState), "ContainerState is not correct (timedout)"); } public static Token createContainerToken(ContainerId cId, long rmIdentifier, NodeId nodeId, String user, NMContainerTokenSecretManager containerTokenSecretManager) throws IOException { return createContainerToken(cId, rmIdentifier, nodeId, user, containerTokenSecretManager, null); } public static Token createContainerToken(ContainerId cId, long rmIdentifier, NodeId nodeId, String user, NMContainerTokenSecretManager containerTokenSecretManager, LogAggregationContext logAggregationContext) throws IOException { Resource r = Resources.createResource(1024); return createContainerToken(cId, rmIdentifier, nodeId, user, r, containerTokenSecretManager, logAggregationContext); } public static Token createContainerToken(ContainerId cId, long rmIdentifier, NodeId nodeId, String user, NMContainerTokenSecretManager containerTokenSecretManager, LogAggregationContext logAggregationContext, ContainerType containerType) throws IOException { Resource r = Resources.createResource(1024); return createContainerToken(cId, rmIdentifier, nodeId, user, r, containerTokenSecretManager, logAggregationContext, containerType); } public static Token createContainerToken(ContainerId cId, long rmIdentifier, NodeId nodeId, String user, Resource resource, NMContainerTokenSecretManager containerTokenSecretManager, LogAggregationContext logAggregationContext) throws IOException { ContainerTokenIdentifier containerTokenIdentifier = new ContainerTokenIdentifier(cId, nodeId.toString(), user, resource, System.currentTimeMillis() + 100000L, 123, rmIdentifier, Priority.newInstance(0), 0, logAggregationContext, null); return BuilderUtils.newContainerToken(nodeId, containerTokenSecretManager .retrievePassword(containerTokenIdentifier), containerTokenIdentifier); } public static Token createContainerToken(ContainerId cId, long rmIdentifier, NodeId nodeId, String user, Resource resource, NMContainerTokenSecretManager containerTokenSecretManager, LogAggregationContext logAggregationContext, ContainerType continerType) throws IOException { ContainerTokenIdentifier containerTokenIdentifier = new ContainerTokenIdentifier(cId, nodeId.toString(), user, resource, System.currentTimeMillis() + 100000L, 123, rmIdentifier, Priority.newInstance(0), 0, logAggregationContext, null, continerType); return BuilderUtils.newContainerToken(nodeId, containerTokenSecretManager.retrievePassword(containerTokenIdentifier), containerTokenIdentifier); } public static Token createContainerToken(ContainerId cId, int version, long rmIdentifier, NodeId nodeId, String user, Resource resource, NMContainerTokenSecretManager containerTokenSecretManager, LogAggregationContext logAggregationContext) throws IOException { ContainerTokenIdentifier containerTokenIdentifier = new ContainerTokenIdentifier(cId, version, nodeId.toString(), user, resource, System.currentTimeMillis() + 100000L, 123, rmIdentifier, Priority.newInstance(0), 0, logAggregationContext, null, ContainerType.TASK, ExecutionType.GUARANTEED); return BuilderUtils.newContainerToken(nodeId, containerTokenSecretManager.retrievePassword(containerTokenIdentifier), containerTokenIdentifier); } public static Token createContainerToken(ContainerId cId, long rmIdentifier, NodeId nodeId, String user, Resource resource, NMContainerTokenSecretManager containerTokenSecretManager, LogAggregationContext logAggregationContext, ExecutionType executionType) throws IOException { ContainerTokenIdentifier containerTokenIdentifier = new ContainerTokenIdentifier(cId, 0, nodeId.toString(), user, resource, System.currentTimeMillis() + 100000L, 123, rmIdentifier, Priority.newInstance(0), 0, logAggregationContext, null, ContainerType.TASK, executionType); return BuilderUtils.newContainerToken(nodeId, containerTokenSecretManager.retrievePassword(containerTokenIdentifier), containerTokenIdentifier); } public static Token createContainerToken(ContainerId cId, int version, long rmIdentifier, NodeId nodeId, String user, Resource resource, NMContainerTokenSecretManager containerTokenSecretManager, LogAggregationContext logAggregationContext, ExecutionType executionType) throws IOException { ContainerTokenIdentifier containerTokenIdentifier = new ContainerTokenIdentifier(cId, version, nodeId.toString(), user, resource, System.currentTimeMillis() + 100000L, 123, rmIdentifier, Priority.newInstance(0), 0, logAggregationContext, null, ContainerType.TASK, executionType); return BuilderUtils.newContainerToken(nodeId, containerTokenSecretManager.retrievePassword(containerTokenIdentifier), containerTokenIdentifier); } public static ContainerId createContainerId(int id) { // Use default appId = 0 return createContainerId(id, 0); } public static ContainerId createContainerId(int cId, int aId) { ApplicationId appId = ApplicationId.newInstance(0, aId); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId containerId = ContainerId.newContainerId(appAttemptId, cId); return containerId; } }
BaseContainerManagerTest
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/StringSplitterTest.java
{ "start": 4031, "end": 4345 }
class ____ { void f() { for (String s : "".split(":" + 0)) {} } } """) .addOutputLines( "Test.java", """ import com.google.common.base.Splitter; import java.util.regex.Pattern;
Test
java
apache__rocketmq
tools/src/test/java/org/apache/rocketmq/tools/command/offset/ResetOffsetByTimeOldCommandTest.java
{ "start": 1117, "end": 1920 }
class ____ { @Test public void testExecute() { ResetOffsetByTimeOldCommand cmd = new ResetOffsetByTimeOldCommand(); Options options = ServerUtil.buildCommandlineOptions(new Options()); String[] subargs = new String[] {"-g default-group", "-t unit-test", "-s 1412131213231", "-f false"}; final CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs, cmd.buildCommandlineOptions(options), new DefaultParser()); assertThat(commandLine.getOptionValue('g').trim()).isEqualTo("default-group"); assertThat(commandLine.getOptionValue('t').trim()).isEqualTo("unit-test"); assertThat(commandLine.getOptionValue('s').trim()).isEqualTo("1412131213231"); } }
ResetOffsetByTimeOldCommandTest
java
elastic__elasticsearch
modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ApostropheFilterFactory.java
{ "start": 877, "end": 1219 }
class ____ extends AbstractTokenFilterFactory { ApostropheFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(name); } @Override public TokenStream create(TokenStream tokenStream) { return new ApostropheFilter(tokenStream); } }
ApostropheFilterFactory
java
apache__hadoop
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java
{ "start": 1613, "end": 11081 }
class ____ extends NativeAzureFileSystemBaseTest { @Override protected AzureBlobStorageTestAccount createTestAccount() throws Exception { return AzureBlobStorageTestAccount.create(); } /** * Tests the rename file operation to ensure that when there are multiple * attempts to rename a file to the same destination, only one rename * operation is successful (HADOOP-15086). */ @Test public void testMultipleRenameFileOperationsToSameDestination() throws IOException, InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final AtomicInteger successfulRenameCount = new AtomicInteger(0); final AtomicReference<IOException> unexpectedError = new AtomicReference<IOException>(); final Path dest = path("dest"); // Run 10 threads to rename multiple files to the same target path List<Thread> threads = new ArrayList<>(); for (int i = 0; i < 10; i++) { final int threadNumber = i; Path src = path("test" + threadNumber); threads.add(new SubjectInheritingThread(() -> { try { latch.await(Long.MAX_VALUE, TimeUnit.SECONDS); } catch (InterruptedException e) { } try { try (OutputStream output = fs.create(src)) { output.write(("Source file number " + threadNumber).getBytes()); } if (fs.rename(src, dest)) { LOG.info("rename succeeded for thread " + threadNumber); successfulRenameCount.incrementAndGet(); } } catch (IOException e) { unexpectedError.compareAndSet(null, e); ContractTestUtils.fail("Exception unexpected", e); } })); } // Start each thread threads.forEach(t -> t.start()); // Wait for threads to start and wait on latch Thread.sleep(2000); // Now start to rename latch.countDown(); // Wait for all threads to complete threads.forEach(t -> { try { t.join(); } catch (InterruptedException e) { } }); if (unexpectedError.get() != null) { throw unexpectedError.get(); } assertEquals(1, successfulRenameCount.get()); LOG.info("Success, only one rename operation succeeded!"); } @Test public void testLazyRenamePendingCanOverwriteExistingFile() throws Exception { final String srcFile = "srcFile"; final String dstFile = "dstFile"; Path srcPath = path(srcFile); FSDataOutputStream srcStream = fs.create(srcPath); assertTrue(fs.exists(srcPath)); Path dstPath = path(dstFile); FSDataOutputStream dstStream = fs.create(dstPath); assertTrue(fs.exists(dstPath)); NativeAzureFileSystem nfs = fs; final String fullSrcKey = nfs.pathToKey(nfs.makeAbsolute(srcPath)); final String fullDstKey = nfs.pathToKey(nfs.makeAbsolute(dstPath)); nfs.getStoreInterface().rename(fullSrcKey, fullDstKey, true, null); assertTrue(fs.exists(dstPath)); assertFalse(fs.exists(srcPath)); IOUtils.cleanupWithLogger(null, srcStream); IOUtils.cleanupWithLogger(null, dstStream); } /** * Tests fs.delete() function to delete a blob when another blob is holding a * lease on it. Delete if called without a lease should fail if another process * is holding a lease and throw appropriate exception * This is a scenario that would happen in HMaster startup when it tries to * clean up the temp dirs while the HMaster process which was killed earlier * held lease on the blob when doing some DDL operation */ @Test public void testDeleteThrowsExceptionWithLeaseExistsErrorMessage() throws Exception { LOG.info("Starting test"); // Create the file Path path = methodPath(); fs.create(path); assertPathExists("test file", path); NativeAzureFileSystem nfs = fs; final String fullKey = nfs.pathToKey(nfs.makeAbsolute(path)); final AzureNativeFileSystemStore store = nfs.getStore(); // Acquire the lease on the file in a background thread final CountDownLatch leaseAttemptComplete = new CountDownLatch(1); final CountDownLatch beginningDeleteAttempt = new CountDownLatch(1); SubjectInheritingThread t = new SubjectInheritingThread() { @Override public void work() { // Acquire the lease and then signal the main test thread. SelfRenewingLease lease = null; try { lease = store.acquireLease(fullKey); LOG.info("Lease acquired: " + lease.getLeaseID()); } catch (AzureException e) { LOG.warn("Lease acqusition thread unable to acquire lease", e); } finally { leaseAttemptComplete.countDown(); } // Wait for the main test thread to signal it will attempt the delete. try { beginningDeleteAttempt.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } // Keep holding the lease past the lease acquisition retry interval, so // the test covers the case of delete retrying to acquire the lease. try { Thread.sleep(SelfRenewingLease.LEASE_ACQUIRE_RETRY_INTERVAL * 3); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } try { if (lease != null){ LOG.info("Freeing lease"); lease.free(); } } catch (StorageException se) { LOG.warn("Unable to free lease.", se); } } }; // Start the background thread and wait for it to signal the lease is held. t.start(); try { leaseAttemptComplete.await(); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } // Try to delete the same file beginningDeleteAttempt.countDown(); store.delete(fullKey); // At this point file SHOULD BE DELETED assertPathDoesNotExist("Leased path", path); } /** * Check that isPageBlobKey works as expected. This assumes that * in the test configuration, the list of supported page blob directories * only includes "pageBlobs". That's why this test is made specific * to this subclass. */ @Test public void testIsPageBlobKey() { AzureNativeFileSystemStore store = fs.getStore(); // Use literal strings so it's easier to understand the tests. // In case the constant changes, we want to know about it so we can update this test. assertEquals(AzureBlobStorageTestAccount.DEFAULT_PAGE_BLOB_DIRECTORY, "pageBlobs"); // URI prefix for test environment. String uriPrefix = "file:///"; // negative tests String[] negativeKeys = { "", "/", "bar", "bar/", "bar/pageBlobs", "bar/pageBlobs/foo", "bar/pageBlobs/foo/", "/pageBlobs/", "/pageBlobs", "pageBlobsxyz/" }; for (String s : negativeKeys) { assertFalse(store.isPageBlobKey(s)); assertFalse(store.isPageBlobKey(uriPrefix + s)); } // positive tests String[] positiveKeys = { "pageBlobs/", "pageBlobs/foo/", "pageBlobs/foo/bar/" }; for (String s : positiveKeys) { assertTrue(store.isPageBlobKey(s)); assertTrue(store.isPageBlobKey(uriPrefix + s)); } } /** * Test that isAtomicRenameKey() works as expected. */ @Test public void testIsAtomicRenameKey() { AzureNativeFileSystemStore store = fs.getStore(); // We want to know if the default configuration changes so we can fix // this test. assertEquals(AzureBlobStorageTestAccount.DEFAULT_ATOMIC_RENAME_DIRECTORIES, "/atomicRenameDir1,/atomicRenameDir2"); // URI prefix for test environment. String uriPrefix = "file:///"; // negative tests String[] negativeKeys = { "", "/", "bar", "bar/", "bar/hbase", "bar/hbase/foo", "bar/hbase/foo/", "/hbase/", "/hbase", "hbasexyz/", "foo/atomicRenameDir1/"}; for (String s : negativeKeys) { assertFalse(store.isAtomicRenameKey(s)); assertFalse(store.isAtomicRenameKey(uriPrefix + s)); } // Positive tests. The directories for atomic rename are /hbase // plus the ones in the configuration (DEFAULT_ATOMIC_RENAME_DIRECTORIES // for this test). String[] positiveKeys = { "hbase/", "hbase/foo/", "hbase/foo/bar/", "atomicRenameDir1/foo/", "atomicRenameDir2/bar/"}; for (String s : positiveKeys) { assertTrue(store.isAtomicRenameKey(s)); assertTrue(store.isAtomicRenameKey(uriPrefix + s)); } } /** * Tests fs.mkdir() function to create a target blob while another thread * is holding the lease on the blob. mkdir should not fail since the blob * already exists. * This is a scenario that would happen in HBase distributed log splitting. * Multiple threads will try to create and update "recovered.edits" folder * under the same path. */ @Test public void testMkdirOnExistingFolderWithLease() throws Exception { SelfRenewingLease lease; // Create the folder Path path = methodPath(); fs.mkdirs(path); NativeAzureFileSystem nfs = fs; String fullKey = nfs.pathToKey(nfs.makeAbsolute(path)); AzureNativeFileSystemStore store = nfs.getStore(); // Acquire the lease on the folder lease = store.acquireLease(fullKey); assertNotNull(lease.getLeaseID() != null, "lease ID"); // Try to create the same folder store.storeEmptyFolder(fullKey, nfs.createPermissionStatus(FsPermission.getDirDefault())); lease.free(); } }
ITestNativeAzureFileSystemLive
java
spring-projects__spring-boot
documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/howto/traditionaldeployment/war/MyApplication.java
{ "start": 997, "end": 1309 }
class ____ extends SpringBootServletInitializer { @Override protected SpringApplicationBuilder configure(SpringApplicationBuilder application) { return application.sources(MyApplication.class); } public static void main(String[] args) { SpringApplication.run(MyApplication.class, args); } }
MyApplication
java
elastic__elasticsearch
x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownPluginsIT.java
{ "start": 997, "end": 4262 }
class ____ extends ESIntegTestCase { private static final Logger logger = LogManager.getLogger(NodeShutdownPluginsIT.class); public static final AtomicBoolean safe = new AtomicBoolean(true); public static final AtomicReference<Collection<String>> triggeredNodes = new AtomicReference<>(null); @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(ShutdownPlugin.class, TestShutdownAwarePlugin.class); } public void testShutdownAwarePlugin() throws Exception { final String node1 = internalCluster().startNode(); final String node2 = internalCluster().startNode(); final String shutdownNode; final String remainNode; final String node1Id = getNodeId(node1); final String node2Id = getNodeId(node2); if (randomBoolean()) { shutdownNode = node1Id; remainNode = node2Id; } else { shutdownNode = node2Id; remainNode = node1Id; } logger.info("--> node {} will be shut down, {} will remain", shutdownNode, remainNode); // First, mark the plugin as not yet safe safe.set(false); // Mark the node as shutting down client().execute( PutShutdownNodeAction.INSTANCE, new PutShutdownNodeAction.Request( TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, shutdownNode, SingleNodeShutdownMetadata.Type.REMOVE, "removal for testing", null, null, null ) ).get(); GetShutdownStatusAction.Response getResp = client().execute( GetShutdownStatusAction.INSTANCE, new GetShutdownStatusAction.Request(TEST_REQUEST_TIMEOUT, remainNode) ).get(); assertTrue(getResp.getShutdownStatuses().isEmpty()); // The plugin should be in progress getResp = client().execute( GetShutdownStatusAction.INSTANCE, new GetShutdownStatusAction.Request(TEST_REQUEST_TIMEOUT, shutdownNode) ).get(); assertThat( getResp.getShutdownStatuses().get(0).pluginsStatus().getStatus(), equalTo(SingleNodeShutdownMetadata.Status.IN_PROGRESS) ); // Change the plugin to be "done" shutting down safe.set(true); // The plugin should be complete getResp = client().execute( GetShutdownStatusAction.INSTANCE, new GetShutdownStatusAction.Request(TEST_REQUEST_TIMEOUT, shutdownNode) ).get(); assertThat(getResp.getShutdownStatuses().get(0).pluginsStatus().getStatus(), equalTo(SingleNodeShutdownMetadata.Status.COMPLETE)); // The shutdown node should be in the triggered list assertThat(triggeredNodes.get(), contains(shutdownNode)); client().execute( DeleteShutdownNodeAction.INSTANCE, new DeleteShutdownNodeAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, shutdownNode) ).get(); // The shutdown node should now not in the triggered list assertThat(triggeredNodes.get(), empty()); } public static
NodeShutdownPluginsIT
java
google__guava
guava-testlib/test/com/google/common/testing/NullPointerTesterTest.java
{ "start": 27529, "end": 27856 }
class ____ extends BaseClassThatFailsToThrow { @Keep public void oneArg(@Nullable CharSequence s) {} } public void testSubclassOverridesTheWrongMethod() { shouldFail(new SubclassOverridesTheWrongMethod()); } @SuppressWarnings("unused") // for NullPointerTester private static
SubclassOverridesTheWrongMethod
java
elastic__elasticsearch
modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java
{ "start": 20948, "end": 24849 }
class ____ extends Plugin implements ActionPlugin { static final String ROUTE = "/_test/chunk_and_fail"; static final String FAIL_AFTER_BYTES_PARAM = "fail_after_bytes"; static String randomRequestUri() { return ROUTE + '?' + FAIL_AFTER_BYTES_PARAM + '=' + between(0, ByteSizeUnit.MB.toIntBytes(2)); } @Override public Collection<RestHandler> getRestHandlers( Settings settings, NamedWriteableRegistry namedWriteableRegistry, RestController restController, ClusterSettings clusterSettings, IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<DiscoveryNodes> nodesInCluster, Predicate<NodeFeature> clusterSupportsFeature ) { return List.of(new BaseRestHandler() { @Override public String getName() { return ROUTE; } @Override public List<Route> routes() { return List.of(new Route(GET, ROUTE)); } @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { final var failAfterBytes = request.paramAsInt(FAIL_AFTER_BYTES_PARAM, -1); if (failAfterBytes < 0) { throw new IllegalArgumentException("[" + FAIL_AFTER_BYTES_PARAM + "] must be present and non-negative"); } return channel -> randomExecutor(client.threadPool()).execute( () -> channel.sendResponse(RestResponse.chunked(RestStatus.OK, new ChunkedRestResponseBodyPart() { int bytesRemaining = failAfterBytes; @Override public boolean isPartComplete() { return false; } @Override public boolean isLastPart() { return true; } @Override public void getNextPart(ActionListener<ChunkedRestResponseBodyPart> listener) { fail("no continuations here"); } @Override public ReleasableBytesReference encodeChunk(int sizeHint, Recycler<BytesRef> recycler) throws IOException { assert bytesRemaining >= 0 : "already failed"; if (bytesRemaining == 0) { bytesRemaining = -1; throw new IOException("simulated failure"); } else { final var bytesToSend = between(1, bytesRemaining); bytesRemaining -= bytesToSend; return ReleasableBytesReference.wrap(new ZeroBytesReference(bytesToSend)); } } @Override public String getResponseContentTypeString() { return RestResponse.TEXT_CONTENT_TYPE; } }, null)) ); } }); } } /** * Adds an HTTP route that only responds when starting to process a second request, ensuring that there is always at least one in-flight * request in the pipeline which keeps a connection from becoming idle. */ public static
ChunkAndFailPlugin
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/result/internal/ResultSetOutputImpl.java
{ "start": 314, "end": 950 }
class ____<T> implements ResultSetOutput<T> { private final Supplier<List<T>> resultSetSupplier; public ResultSetOutputImpl(List<T> results) { this.resultSetSupplier = () -> results; } public ResultSetOutputImpl(Supplier<List<T>> resultSetSupplier) { this.resultSetSupplier = resultSetSupplier; } @Override public boolean isResultSet() { return true; } @Override public List<T> getResultList() { return resultSetSupplier.get(); } @Override public Object getSingleResult() { final List<?> results = getResultList(); return results == null || results.isEmpty() ? null : results.get( 0 ); } }
ResultSetOutputImpl
java
micronaut-projects__micronaut-core
inject-groovy/src/main/groovy/io/micronaut/ast/groovy/scan/AnnotationClassReader.java
{ "start": 17583, "end": 18204 }
class ____ and type annotations if (ANNOTATIONS && anns != 0) { for (int i = readUnsignedShort(anns), v = anns + 2; i > 0; --i) { v = readAnnotationValues(v + 2, c, true, classVisitor.visitAnnotation(readUTF8(v, c), true)); } } if (ANNOTATIONS && ianns != 0) { for (int i = readUnsignedShort(ianns), v = ianns + 2; i > 0; --i) { v = readAnnotationValues(v + 2, c, true, classVisitor.visitAnnotation(readUTF8(v, c), false)); } } // visits the end of the
annotations
java
elastic__elasticsearch
server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java
{ "start": 1348, "end": 2767 }
class ____ extends ESIntegTestCase { public void testClearIndicesCacheWithBlocks() { createIndex("test"); ensureGreen("test"); NumShards numShards = getNumShards("test"); // Request is not blocked for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { try { enableIndexBlock("test", blockSetting); BroadcastResponse clearIndicesCacheResponse = indicesAdmin().prepareClearCache("test") .setFieldDataCache(true) .setQueryCache(true) .setFieldDataCache(true) .get(); assertNoFailures(clearIndicesCacheResponse); assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { disableIndexBlock("test", blockSetting); } } // Request is blocked for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { try { enableIndexBlock("test", blockSetting); assertBlocked(indicesAdmin().prepareClearCache("test").setFieldDataCache(true).setQueryCache(true).setFieldDataCache(true)); } finally { disableIndexBlock("test", blockSetting); } } } }
ClearIndicesCacheBlocksIT
java
micronaut-projects__micronaut-core
inject/src/main/java/io/micronaut/context/env/DefaultConfigurationPath.java
{ "start": 1231, "end": 17047 }
class ____ implements ConfigurationPath { private final LinkedList<ConfigurationSegment> list = new LinkedList<>(); private String computedPrefix; private boolean hasDynamicSegments = false; private PropertyCatalog propertyCatalog = PropertyCatalog.NORMALIZED; DefaultConfigurationPath() { recomputeState(); } @Override public boolean hasDynamicSegments() { return hasDynamicSegments || kind() == ConfigurationSegment.ConfigurationKind.NAME || kind() == ConfigurationSegment.ConfigurationKind.INDEX; } @Override public ConfigurationPath parent() { int i = list.size(); if (i > 1) { DefaultConfigurationPath configurationPath = new DefaultConfigurationPath(); configurationPath.list.addAll(list.subList(0, i - 1)); configurationPath.hasDynamicSegments = hasDynamicSegments; configurationPath.recomputeState(); return configurationPath; } return null; } @NonNull @Override public ConfigurationPath copy() { DefaultConfigurationPath newPath = new DefaultConfigurationPath(); newPath.list.addAll(this.list); newPath.computedPrefix = computedPrefix; newPath.hasDynamicSegments = hasDynamicSegments; return newPath; } @NonNull @Override public String prefix() { return computedPrefix; } @NonNull @Override public String path() { ConfigurationSegment segment = peekLast(); if (segment != null) { return segment.path(); } else { return StringUtils.EMPTY_STRING; } } @Override public String primary() { ConfigurationSegment segment = peekLast(); if (segment != null) { return segment.primary(); } return null; } @Override public boolean isNotEmpty() { return !list.isEmpty(); } @NonNull @Override public String resolveValue(String value) { return value.replace(path(), prefix()); } @Override public Class<?> configurationType() { ConfigurationSegment segment = peekLast(); if (segment != null) { return segment.type(); } return null; } @Override public String name() { ConfigurationSegment segment = peekLast(); if (segment != null) { return segment.name(); } return null; } @Override public int index() { Iterator<ConfigurationSegment> i = list.descendingIterator(); while (i.hasNext()) { ConfigurationSegment s = i.next(); if (s.kind() == ConfigurationSegment.ConfigurationKind.INDEX) { return s.index(); } } return -1; } @NonNull @Override public PropertyCatalog propertyCatalog() { return propertyCatalog; } @Override public String simpleName() { ConfigurationSegment segment = peekLast(); if (segment != null) { return segment.simpleName(); } return null; } @Override public void traverseResolvableSegments(@NonNull PropertyResolver propertyResolver, @NonNull Consumer<ConfigurationPath> callback) { if (hasDynamicSegments) { // match a path pattern like foo.*.bar.* Collection<List<String>> variableValues = propertyResolver.getPropertyPathMatches(path()); for (List<String> variables : variableValues) { ConfigurationPath newPath = replaceVariables(variables); traversePath(newPath, propertyResolver, callback); } } else { // simple case just traverse entries traversePath(this, propertyResolver, callback); } } @SuppressWarnings("java:S1301") private ConfigurationPath replaceVariables(List<String> variables) { int varIndex = 0; DefaultConfigurationPath newPath = new DefaultConfigurationPath(); newPath.hasDynamicSegments = true; for (ConfigurationSegment configurationSegment : list) { switch (configurationSegment.kind()) { case NAME, INDEX -> { if (varIndex < variables.size()) { ConfigurationSegment.ConfigurationKind kind = newPath.kind(); switch (kind) { case LIST -> newPath.pushConfigurationSegment(Integer.parseInt(variables.get(varIndex++))); case MAP -> newPath.pushConfigurationSegment(variables.get(varIndex++)); default -> newPath.pushConfigurationSegment(configurationSegment); } } else { newPath.pushConfigurationSegment(configurationSegment); } } default -> newPath.pushConfigurationSegment(configurationSegment); } } return newPath; } private static void traversePath(ConfigurationPath thisPath, PropertyResolver propertyResolver, Consumer<ConfigurationPath> callback) { ConfigurationSegment.ConfigurationKind kind = thisPath.kind(); switch (kind) { case MAP -> { Collection<String> entries = propertyResolver.getPropertyEntries(thisPath.prefix(), thisPath.propertyCatalog()); for (String key : entries) { ConfigurationPath newPath = thisPath.copy(); newPath.pushConfigurationSegment(key); callback.accept(newPath); } } case LIST -> { List<?> entries = propertyResolver.getProperty(thisPath.prefix(), List.class, Collections.emptyList()); for (int i = 0; i < entries.size(); i++) { Object o = entries.get(i); if (o != null) { ConfigurationPath newPath = thisPath.copy(); newPath.pushConfigurationSegment(i); callback.accept(newPath); } } } case NAME, INDEX -> { ConfigurationPath parent = thisPath.parent(); if (parent != null) { traversePath(parent, propertyResolver, callback); } } default -> { if (propertyResolver.containsProperties(thisPath.prefix())) { callback.accept(thisPath); } } } } @Override public ConfigurationSegment.@NonNull ConfigurationKind kind() { ConfigurationSegment segment = peekLast(); if (segment != null) { return segment.kind(); } return ConfigurationSegment.ConfigurationKind.ROOT; } @Override public ConfigurationSegment peekLast() { return list.peekLast(); } @Override public boolean isWithin(String prefix) { return prefix != null && prefix.startsWith(path()); } @Override public void pushEachPropertyRoot(@NonNull BeanDefinition<?> beanDefinition) { if (!beanDefinition.getBeanType().equals(configurationType())) { if (kind() != ConfigurationSegment.ConfigurationKind.ROOT) { this.hasDynamicSegments = true; } propertyCatalog = beanDefinition.enumValue(EachProperty.class, "catalog", PropertyCatalog.class).orElse(PropertyCatalog.NORMALIZED); boolean isList = beanDefinition.booleanValue(EachProperty.class, "list").orElse(false); String prefix = beanDefinition.stringValue(ConfigurationReader.class, ConfigurationReader.PREFIX).orElse(null); if (prefix != null) { String currentPath = path(); if (!prefix.startsWith(currentPath)) { throw new IllegalStateException("Invalid configuration properties nesting for path [" + prefix + "]. Expected: " + currentPath); } String resolvedPrefix = prefix; if (!currentPath.equals(StringUtils.EMPTY_STRING) && !prefix.equals(currentPath)) { resolvedPrefix = prefix.substring(currentPath.length() + 1); } String property = resolvedPrefix.substring(0, resolvedPrefix.length() - (isList ? 3 : 2)); String primaryName = beanDefinition.stringValue(EachProperty.class, "primary").orElse(null); list.add(new DefaultConfigurationSegment( beanDefinition.getBeanType(), property, prefix, isList ? ConfigurationSegment.ConfigurationKind.LIST : ConfigurationSegment.ConfigurationKind.MAP, null, null, primaryName, -1 )); recomputeState(); } } } @Override public void pushConfigurationReader(@NonNull BeanDefinition<?> beanDefinition) { if (!beanDefinition.getBeanType().equals(configurationType())) { if (kind() != ConfigurationSegment.ConfigurationKind.ROOT) { this.hasDynamicSegments = true; } String prefix = beanDefinition.stringValue(ConfigurationReader.class, ConfigurationReader.PREFIX).orElse(null); if (prefix != null) { String currentPath = path(); if (!prefix.startsWith(currentPath)) { throw new IllegalStateException("Invalid configuration properties nesting for path [" + prefix + "]. Expected: " + currentPath); } String p = prefix.substring(currentPath.length() + 1); list.add(new DefaultConfigurationSegment( beanDefinition.getBeanType(), p, prefix, ConfigurationSegment.ConfigurationKind.ROOT, name(), simpleName(), primary(), -1 )); recomputeState(); } } } @Override public void pushConfigurationSegment(@NonNull ConfigurationSegment configurationSegment) { ConfigurationSegment.ConfigurationKind kind = configurationSegment.kind(); switch (kind) { case NAME -> pushConfigurationSegment(configurationSegment.name()); case INDEX -> pushConfigurationSegment(configurationSegment.index()); case ROOT -> list.add(new DefaultConfigurationSegment( configurationSegment.type(), configurationSegment.prefix(), configurationSegment.path(), ConfigurationSegment.ConfigurationKind.ROOT, name(), // inherit name simpleName(), primary(), // inherit name index() // inherit the index )); default -> list.add(configurationSegment); } recomputeState(); } @Override public void pushConfigurationSegment(@NonNull String name) { String primary = primary(); ConfigurationSegment.ConfigurationKind kind = kind(); String p = switch (kind) { case MAP -> path(); case LIST -> throw new IllegalStateException("Illegal @EachProperty nesting encountered. Lists require numerical entries."); default -> throw new IllegalStateException("Illegal @EachProperty nesting, expecting a nested named not another configuration reader or name."); }; String qualifiedName = computeName(name); list.add(new DefaultConfigurationSegment( configurationType(), name, p, ConfigurationSegment.ConfigurationKind.NAME, qualifiedName, name, primary, -1 )); recomputeState(); } @Override public void pushConfigurationSegment(int index) { ConfigurationSegment.ConfigurationKind kind = kind(); String p = switch (kind) { case MAP -> throw new IllegalStateException("Illegal @EachProperty nesting encountered. Maps require key entries."); case LIST -> path(); default -> throw new IllegalStateException("Illegal @EachProperty nesting, expecting a nested named not another configuration reader or name."); }; String primary = primary(); String strIndex = String.valueOf(index); String qualifiedName = computeName(strIndex); list.add(new DefaultConfigurationSegment( configurationType(), "["+ strIndex + "]", p, ConfigurationSegment.ConfigurationKind.INDEX, qualifiedName, strIndex, primary, index )); recomputeState(); } private String computeName(String simpleName) { String qualifiedName = null; Iterator<ConfigurationSegment> i = list.descendingIterator(); while (i.hasNext()) { qualifiedName = i.next().name(); if (qualifiedName != null) { break; } } if (qualifiedName != null) { qualifiedName = qualifiedName + "-" + simpleName; } else { qualifiedName = simpleName; } return qualifiedName; } private void recomputeState() { StringBuilder str = new StringBuilder(); Iterator<ConfigurationSegment> i = list.iterator(); ConfigurationSegment previous = null; while (i.hasNext()) { ConfigurationSegment configurationSegment = i.next(); if (configurationSegment.kind() == ConfigurationSegment.ConfigurationKind.INDEX) { str.append('[').append(configurationSegment.index()).append(']'); } else { if (previous != null) { str.append('.'); } str.append(configurationSegment); } previous = configurationSegment; } computedPrefix = str.toString(); } @NonNull @Override public ConfigurationSegment removeLast() { try { return list.removeLast(); } finally { recomputeState(); } } @Override public String toString() { return computedPrefix; } @NonNull @Override public Iterator<ConfigurationSegment> iterator() { return list.iterator(); } record DefaultConfigurationSegment( Class<?> type, String prefix, String path, ConfigurationKind kind, String name, String simpleName, String primary, int index) implements ConfigurationSegment { @Override public int length() { return prefix.length(); } @Override public char charAt(int index) { return prefix.charAt(index); } @NonNull @Override public CharSequence subSequence(int start, int end) { return prefix.subSequence(start, end); } @NonNull @Override public String toString() { return prefix; } } }
DefaultConfigurationPath
java
apache__camel
core/camel-api/src/generated/java/org/apache/camel/spi/UriEndpoint.java
{ "start": 1387, "end": 6099 }
interface ____ { /** * The first version this endpoint was added to Apache Camel. */ String firstVersion() default ""; /** * Represents the URI scheme name of this endpoint. * * Multiple scheme names can be defined as a comma separated value. For example to associate <var>http</var> and * <var>https</var> to the same endpoint implementation. * * The order of the scheme names here should be the same order as in {@link #extendsScheme()} so their are paired. * * The schema name must be lowercase, it may contain dashes as well. For example: robot-framework. */ String scheme(); /** * Used when an endpoint is extending another endpoint * * Multiple scheme names can be defined as a comma separated value. For example to associate <var>ftp</var> and * <var>ftps</var> to the same endpoint implementation. The order of the scheme names here should be the same order * as in {@link #scheme()} so their are paired. */ String extendsScheme() default ""; /** * Represent the URI syntax the endpoint must use. * * The syntax follows the patterns such as: * <ul> * <li>scheme:host:port</li> * <li>scheme:host:port/path</li> * <li>scheme:path</li> * <li>scheme:path/path2</li> * </ul> * Where each path maps to the name of the endpoint {@link org.apache.camel.spi.UriPath} option. The query * parameters is implied and should not be included in the syntax. * * Some examples: * <ul> * <li>file:directoryName</li> * <li>ftp:host:port/directoryName</li> * <li>jms:destinationType:destinationName</li> * </ul> */ String syntax(); /** * If the endpoint supports specifying username and/or password in the UserInfo part of the URI, then the * alternative syntax can represent this such as: * <ul> * <li>ftp:userName:password@host:port/directoryName</li> * <li>ssh:username:password@host:port</li> * </ul> */ String alternativeSyntax() default ""; /** * The configuration parameter name prefix used on parameter names to separate the endpoint properties from the * consumer properties */ String consumerPrefix() default ""; /** * A human-readable title of this entity, such as the component name of the this endpoint. * * For example: JMS, MQTT, Netty HTTP, SAP NetWeaver */ String title(); /** * To associate this endpoint with category(ies). * <p> * This category is intended for grouping the endpoints, such as <code>Category.CORE</code>, * <code>Category.FILE</code>, <code>Category.DATABASE</code>, etc, but supplied with as array of {@link Category} * enums. * </p> * For example: @UriEndpoint(category = {Category.CORE, Category.DATABASE}) */ Category[] category() default {}; /** * Whether this endpoint can only be used as a producer. * * By default, its assumed the endpoint can be used as both consumer and producer. */ boolean producerOnly() default false; /** * Whether this endpoint can only be used as a consumer. * * By default, its assumed the endpoint can be used as both consumer and producer. */ boolean consumerOnly() default false; /** * Should all properties be known or does the endpoint allow unknown options? * * <code>lenient = false</code> means that the endpoint should validate that all given options is known and * configured properly. <code>lenient = true</code> means that the endpoint allows additional unknown options to be * passed to it but does not throw a ResolveEndpointFailedException when creating the endpoint. * * This options is used by a few components for instance the HTTP based that can have dynamic URI options appended * that is targeted for an external system. * * Most endpoints is configured to be <b>not</b> lenient. */ boolean lenientProperties() default false; /** * Generates source code for fast configuring of the endpoint properties which uses direct method invocation of * getter/setters. Setting this to false will fallback to use reflection based introspection as Camel does in Camel * 2.x. */ boolean generateConfigurer() default true; /** * The name of the properties that is used in the endpoint URI to select which API name (method) to use. * * This is only applicable for API based components where configurations are separated by API names (grouping). */ String apiSyntax() default ""; /** * The
UriEndpoint
java
spring-projects__spring-security
web/src/test/java/org/springframework/security/web/context/AbstractSecurityWebApplicationInitializerTests.java
{ "start": 1964, "end": 17989 }
class ____ { private static final EnumSet<DispatcherType> DEFAULT_DISPATCH = EnumSet.of(DispatcherType.REQUEST, DispatcherType.ERROR, DispatcherType.ASYNC, DispatcherType.FORWARD, DispatcherType.INCLUDE); @Test public void onStartupWhenDefaultContextThenRegistersSpringSecurityFilterChain() { ServletContext context = mock(ServletContext.class); FilterRegistration.Dynamic registration = mock(FilterRegistration.Dynamic.class); ArgumentCaptor<DelegatingFilterProxy> proxyCaptor = ArgumentCaptor.forClass(DelegatingFilterProxy.class); given(context.addFilter(eq("springSecurityFilterChain"), proxyCaptor.capture())).willReturn(registration); new AbstractSecurityWebApplicationInitializer() { }.onStartup(context); assertProxyDefaults(proxyCaptor.getValue()); verify(registration).addMappingForUrlPatterns(DEFAULT_DISPATCH, false, "/*"); verify(registration).setAsyncSupported(true); verifyNoAddListener(context); } @Test public void onStartupWhenConfigurationClassThenAddsContextLoaderListener() { ServletContext context = mock(ServletContext.class); FilterRegistration.Dynamic registration = mock(FilterRegistration.Dynamic.class); ArgumentCaptor<DelegatingFilterProxy> proxyCaptor = ArgumentCaptor.forClass(DelegatingFilterProxy.class); given(context.addFilter(eq("springSecurityFilterChain"), proxyCaptor.capture())).willReturn(registration); new AbstractSecurityWebApplicationInitializer(MyRootConfiguration.class) { }.onStartup(context); assertProxyDefaults(proxyCaptor.getValue()); verify(registration).addMappingForUrlPatterns(DEFAULT_DISPATCH, false, "/*"); verify(registration).setAsyncSupported(true); verify(context).addListener(any(ContextLoaderListener.class)); } @Test public void onStartupWhenEnableHttpSessionEventPublisherIsTrueThenAddsHttpSessionEventPublisher() { ServletContext context = mock(ServletContext.class); FilterRegistration.Dynamic registration = mock(FilterRegistration.Dynamic.class); ArgumentCaptor<DelegatingFilterProxy> proxyCaptor = ArgumentCaptor.forClass(DelegatingFilterProxy.class); given(context.addFilter(eq("springSecurityFilterChain"), proxyCaptor.capture())).willReturn(registration); new AbstractSecurityWebApplicationInitializer() { @Override protected boolean enableHttpSessionEventPublisher() { return true; } }.onStartup(context); assertProxyDefaults(proxyCaptor.getValue()); verify(registration).addMappingForUrlPatterns(DEFAULT_DISPATCH, false, "/*"); verify(registration).setAsyncSupported(true); verify(context).addListener(HttpSessionEventPublisher.class.getName()); } @Test public void onStartupWhenCustomSecurityDispatcherTypesThenUses() { ServletContext context = mock(ServletContext.class); FilterRegistration.Dynamic registration = mock(FilterRegistration.Dynamic.class); ArgumentCaptor<DelegatingFilterProxy> proxyCaptor = ArgumentCaptor.forClass(DelegatingFilterProxy.class); given(context.addFilter(eq("springSecurityFilterChain"), proxyCaptor.capture())).willReturn(registration); new AbstractSecurityWebApplicationInitializer() { @Override protected EnumSet<DispatcherType> getSecurityDispatcherTypes() { return EnumSet.of(DispatcherType.REQUEST, DispatcherType.ERROR, DispatcherType.FORWARD); } }.onStartup(context); assertProxyDefaults(proxyCaptor.getValue()); verify(registration).addMappingForUrlPatterns( EnumSet.of(DispatcherType.REQUEST, DispatcherType.ERROR, DispatcherType.FORWARD), false, "/*"); verify(registration).setAsyncSupported(true); verifyNoAddListener(context); } @Test public void onStartupWhenCustomDispatcherWebApplicationContextSuffixThenUses() { ServletContext context = mock(ServletContext.class); FilterRegistration.Dynamic registration = mock(FilterRegistration.Dynamic.class); ArgumentCaptor<DelegatingFilterProxy> proxyCaptor = ArgumentCaptor.forClass(DelegatingFilterProxy.class); given(context.addFilter(eq("springSecurityFilterChain"), proxyCaptor.capture())).willReturn(registration); new AbstractSecurityWebApplicationInitializer() { @Override protected String getDispatcherWebApplicationContextSuffix() { return "dispatcher"; } }.onStartup(context); DelegatingFilterProxy proxy = proxyCaptor.getValue(); assertThat(proxy.getContextAttribute()) .isEqualTo("org.springframework.web.servlet.FrameworkServlet.CONTEXT.dispatcher"); assertThat(proxy).hasFieldOrPropertyWithValue("targetBeanName", "springSecurityFilterChain"); verify(registration).addMappingForUrlPatterns(DEFAULT_DISPATCH, false, "/*"); verify(registration).setAsyncSupported(true); verifyNoAddListener(context); } @Test public void onStartupWhenSpringSecurityFilterChainAlreadyRegisteredThenException() { ServletContext context = mock(ServletContext.class); assertThatIllegalStateException().isThrownBy(() -> new AbstractSecurityWebApplicationInitializer() { }.onStartup(context)) .withMessage("Duplicate Filter registration for 'springSecurityFilterChain'. " + "Check to ensure the Filter is only configured once."); } @Test public void onStartupWhenInsertFiltersThenInserted() { Filter filter1 = mock(Filter.class); Filter filter2 = mock(Filter.class); ServletContext context = mock(ServletContext.class); FilterRegistration.Dynamic registration = mock(FilterRegistration.Dynamic.class); ArgumentCaptor<DelegatingFilterProxy> proxyCaptor = ArgumentCaptor.forClass(DelegatingFilterProxy.class); given(context.addFilter(eq("springSecurityFilterChain"), proxyCaptor.capture())).willReturn(registration); given(context.addFilter(anyString(), eq(filter1))).willReturn(registration); given(context.addFilter(anyString(), eq(filter2))).willReturn(registration); new AbstractSecurityWebApplicationInitializer() { @Override protected void afterSpringSecurityFilterChain(ServletContext servletContext) { insertFilters(context, filter1, filter2); } }.onStartup(context); assertProxyDefaults(proxyCaptor.getValue()); verify(registration, times(3)).addMappingForUrlPatterns(DEFAULT_DISPATCH, false, "/*"); verify(registration, times(3)).setAsyncSupported(true); verifyNoAddListener(context); verify(context).addFilter(anyString(), eq(filter1)); verify(context).addFilter(anyString(), eq(filter2)); } @Test public void onStartupWhenDuplicateFilterInsertedThenException() { Filter filter1 = mock(Filter.class); ServletContext context = mock(ServletContext.class); FilterRegistration.Dynamic registration = mock(FilterRegistration.Dynamic.class); ArgumentCaptor<DelegatingFilterProxy> proxyCaptor = ArgumentCaptor.forClass(DelegatingFilterProxy.class); given(context.addFilter(eq("springSecurityFilterChain"), proxyCaptor.capture())).willReturn(registration); assertThatIllegalStateException().isThrownBy(() -> new AbstractSecurityWebApplicationInitializer() { @Override protected void afterSpringSecurityFilterChain(ServletContext servletContext) { insertFilters(context, filter1); } }.onStartup(context)) .withMessage( "Duplicate Filter registration for 'object'. Check to ensure the Filter is only configured once."); assertProxyDefaults(proxyCaptor.getValue()); verify(registration).addMappingForUrlPatterns(DEFAULT_DISPATCH, false, "/*"); verify(context).addFilter(anyString(), eq(filter1)); } @Test public void onStartupWhenInsertFiltersEmptyThenException() { ServletContext context = mock(ServletContext.class); FilterRegistration.Dynamic registration = mock(FilterRegistration.Dynamic.class); ArgumentCaptor<DelegatingFilterProxy> proxyCaptor = ArgumentCaptor.forClass(DelegatingFilterProxy.class); given(context.addFilter(eq("springSecurityFilterChain"), proxyCaptor.capture())).willReturn(registration); assertThatIllegalArgumentException().isThrownBy(() -> new AbstractSecurityWebApplicationInitializer() { @Override protected void afterSpringSecurityFilterChain(ServletContext servletContext) { insertFilters(context); } }.onStartup(context)).withMessage("filters cannot be null or empty"); assertProxyDefaults(proxyCaptor.getValue()); } @Test public void onStartupWhenNullFilterInsertedThenException() { Filter filter = mock(Filter.class); ServletContext context = mock(ServletContext.class); FilterRegistration.Dynamic registration = mock(FilterRegistration.Dynamic.class); ArgumentCaptor<DelegatingFilterProxy> proxyCaptor = ArgumentCaptor.forClass(DelegatingFilterProxy.class); given(context.addFilter(eq("springSecurityFilterChain"), proxyCaptor.capture())).willReturn(registration); given(context.addFilter(anyString(), eq(filter))).willReturn(registration); assertThatIllegalArgumentException().isThrownBy(() -> new AbstractSecurityWebApplicationInitializer() { @Override protected void afterSpringSecurityFilterChain(ServletContext servletContext) { insertFilters(context, filter, null); } }.onStartup(context)).withMessageContaining("filters cannot contain null values"); verify(context, times(2)).addFilter(anyString(), any(Filter.class)); } @Test public void onStartupWhenAppendFiltersThenAppended() { Filter filter1 = mock(Filter.class); Filter filter2 = mock(Filter.class); ServletContext context = mock(ServletContext.class); FilterRegistration.Dynamic registration = mock(FilterRegistration.Dynamic.class); ArgumentCaptor<DelegatingFilterProxy> proxyCaptor = ArgumentCaptor.forClass(DelegatingFilterProxy.class); given(context.addFilter(eq("springSecurityFilterChain"), proxyCaptor.capture())).willReturn(registration); given(context.addFilter(anyString(), eq(filter1))).willReturn(registration); given(context.addFilter(anyString(), eq(filter2))).willReturn(registration); new AbstractSecurityWebApplicationInitializer() { @Override protected void afterSpringSecurityFilterChain(ServletContext servletContext) { appendFilters(context, filter1, filter2); } }.onStartup(context); verify(registration, times(1)).addMappingForUrlPatterns(DEFAULT_DISPATCH, false, "/*"); verify(registration, times(2)).addMappingForUrlPatterns(DEFAULT_DISPATCH, true, "/*"); verify(registration, times(3)).setAsyncSupported(true); verifyNoAddListener(context); verify(context, times(3)).addFilter(anyString(), any(Filter.class)); } @Test public void onStartupWhenDuplicateFilterAppendedThenException() { Filter filter1 = mock(Filter.class); ServletContext context = mock(ServletContext.class); FilterRegistration.Dynamic registration = mock(FilterRegistration.Dynamic.class); ArgumentCaptor<DelegatingFilterProxy> proxyCaptor = ArgumentCaptor.forClass(DelegatingFilterProxy.class); given(context.addFilter(eq("springSecurityFilterChain"), proxyCaptor.capture())).willReturn(registration); assertThatIllegalStateException().isThrownBy(() -> new AbstractSecurityWebApplicationInitializer() { @Override protected void afterSpringSecurityFilterChain(ServletContext servletContext) { appendFilters(context, filter1); } }.onStartup(context)) .withMessage("Duplicate Filter registration for 'object'. " + "Check to ensure the Filter is only configured once."); assertProxyDefaults(proxyCaptor.getValue()); verify(registration).addMappingForUrlPatterns(DEFAULT_DISPATCH, false, "/*"); verify(context).addFilter(anyString(), eq(filter1)); } @Test public void onStartupWhenAppendFiltersEmptyThenException() { ServletContext context = mock(ServletContext.class); FilterRegistration.Dynamic registration = mock(FilterRegistration.Dynamic.class); ArgumentCaptor<DelegatingFilterProxy> proxyCaptor = ArgumentCaptor.forClass(DelegatingFilterProxy.class); given(context.addFilter(eq("springSecurityFilterChain"), proxyCaptor.capture())).willReturn(registration); assertThatIllegalArgumentException().isThrownBy(() -> new AbstractSecurityWebApplicationInitializer() { @Override protected void afterSpringSecurityFilterChain(ServletContext servletContext) { appendFilters(context); } }.onStartup(context)).withMessage("filters cannot be null or empty"); assertProxyDefaults(proxyCaptor.getValue()); } @Test public void onStartupWhenNullFilterAppendedThenException() { Filter filter = mock(Filter.class); ServletContext context = mock(ServletContext.class); FilterRegistration.Dynamic registration = mock(FilterRegistration.Dynamic.class); ArgumentCaptor<DelegatingFilterProxy> proxyCaptor = ArgumentCaptor.forClass(DelegatingFilterProxy.class); given(context.addFilter(eq("springSecurityFilterChain"), proxyCaptor.capture())).willReturn(registration); given(context.addFilter(anyString(), eq(filter))).willReturn(registration); assertThatIllegalArgumentException().isThrownBy(() -> new AbstractSecurityWebApplicationInitializer() { @Override protected void afterSpringSecurityFilterChain(ServletContext servletContext) { appendFilters(context, filter, null); } }.onStartup(context)).withMessageContaining("filters cannot contain null values"); verify(context, times(2)).addFilter(anyString(), any(Filter.class)); } @Test public void onStartupWhenDefaultsThenSessionTrackingModes() { ServletContext context = mock(ServletContext.class); FilterRegistration.Dynamic registration = mock(FilterRegistration.Dynamic.class); ArgumentCaptor<DelegatingFilterProxy> proxyCaptor = ArgumentCaptor.forClass(DelegatingFilterProxy.class); given(context.addFilter(eq("springSecurityFilterChain"), any(DelegatingFilterProxy.class))) .willReturn(registration); @SuppressWarnings("unchecked") ArgumentCaptor<Set<SessionTrackingMode>> modesCaptor = ArgumentCaptor.forClass(Set.class); new AbstractSecurityWebApplicationInitializer() { }.onStartup(context); verify(context).addFilter(eq("springSecurityFilterChain"), proxyCaptor.capture()); assertProxyDefaults(proxyCaptor.getValue()); verify(context).setSessionTrackingModes(modesCaptor.capture()); Set<SessionTrackingMode> modes = modesCaptor.getValue(); assertThat(modes).hasSize(1); assertThat(modes).containsExactly(SessionTrackingMode.COOKIE); } @Test public void onStartupWhenSessionTrackingModesConfiguredThenUsed() { ServletContext context = mock(ServletContext.class); FilterRegistration.Dynamic registration = mock(FilterRegistration.Dynamic.class); ArgumentCaptor<DelegatingFilterProxy> proxyCaptor = ArgumentCaptor.forClass(DelegatingFilterProxy.class); given(context.addFilter(eq("springSecurityFilterChain"), any(DelegatingFilterProxy.class))) .willReturn(registration); @SuppressWarnings("unchecked") ArgumentCaptor<Set<SessionTrackingMode>> modesCaptor = ArgumentCaptor.forClass(Set.class); willDoNothing().given(context).setSessionTrackingModes(any()); new AbstractSecurityWebApplicationInitializer() { @Override public Set<SessionTrackingMode> getSessionTrackingModes() { return Collections.singleton(SessionTrackingMode.SSL); } }.onStartup(context); verify(context).addFilter(eq("springSecurityFilterChain"), proxyCaptor.capture()); assertProxyDefaults(proxyCaptor.getValue()); verify(context).setSessionTrackingModes(modesCaptor.capture()); Set<SessionTrackingMode> modes = modesCaptor.getValue(); assertThat(modes).hasSize(1); assertThat(modes).containsExactly(SessionTrackingMode.SSL); } @Test public void defaultFilterNameEqualsSpringSecurityFilterChain() { assertThat(AbstractSecurityWebApplicationInitializer.DEFAULT_FILTER_NAME) .isEqualTo("springSecurityFilterChain"); } private static void verifyNoAddListener(ServletContext context) { verify(context, times(0)).addListener(anyString()); verify(context, times(0)).addListener(any(EventListener.class)); verify(context, times(0)).addListener(any(Class.class)); } private static void assertProxyDefaults(DelegatingFilterProxy proxy) { assertThat(proxy.getContextAttribute()).isNull(); assertThat(proxy).hasFieldOrPropertyWithValue("targetBeanName", "springSecurityFilterChain"); } @Configuration static
AbstractSecurityWebApplicationInitializerTests
java
apache__flink
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/functions/aggfunctions/LastValueAggFunctionWithoutOrderTest.java
{ "start": 4851, "end": 5344 }
class ____ extends NumberLastValueAggFunctionWithoutOrderTestBase<Double> { @Override protected Double getValue(String v) { return Double.valueOf(v); } @Override protected AggregateFunction<Double, RowData> getAggregator() { return new LastValueAggFunction<>(DataTypes.DOUBLE().getLogicalType()); } } /** Test for {@link BooleanType}. */ @Nested final
DoubleLastValueAggFunctionWithoutOrderTest
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/sql/results/graph/embeddable/internal/NonAggregatedIdentifierMappingInitializer.java
{ "start": 1726, "end": 2850 }
class ____ extends AbstractInitializer<NonAggregatedIdentifierMappingInitializer.NonAggregatedIdentifierMappingInitializerData> implements EmbeddableInitializer<NonAggregatedIdentifierMappingInitializer.NonAggregatedIdentifierMappingInitializerData> { private final NavigablePath navigablePath; private final NonAggregatedIdentifierMapping embedded; private final EmbeddableMappingType virtualIdEmbeddable; private final EmbeddableMappingType representationEmbeddable; private final EmbeddableInstantiator embeddableInstantiator; private final @Nullable InitializerParent<?> parent; private final boolean isResultInitializer; private final DomainResultAssembler<?>[] assemblers; private final @Nullable Initializer<InitializerData>[] initializers; private final @Nullable Initializer<InitializerData>[] subInitializersForResolveFromInitialized; private final @Nullable Initializer<InitializerData>[] collectionContainingSubInitializers; private final boolean lazyCapable; private final boolean hasLazySubInitializer; private final boolean hasIdClass; public static
NonAggregatedIdentifierMappingInitializer
java
dropwizard__dropwizard
dropwizard-servlets/src/test/java/io/dropwizard/servlets/SlowRequestFilterTest.java
{ "start": 589, "end": 2256 }
class ____ { private final HttpServletRequest request = mock(); private final HttpServletResponse response = mock(); private final FilterChain chain = mock(); private final FilterConfig filterConfig = mock(); private final Logger logger = mock(); private final SlowRequestFilter slowRequestFilter = new SlowRequestFilter(Duration.milliseconds(500)); @BeforeEach void setUp() throws Exception { slowRequestFilter.init(filterConfig); slowRequestFilter.setLogger(logger); slowRequestFilter.setCurrentTimeProvider(() -> 1510330244000000L); when(request.getMethod()).thenReturn("GET"); when(request.getRequestURI()).thenReturn("/some/path"); } @AfterEach void tearDown() throws Exception { slowRequestFilter.destroy(); } @Test void logsSlowRequests() throws Exception { doAnswer(invocationOnMock -> { slowRequestFilter.setCurrentTimeProvider(() -> 1510330745000000L); return null; }).when(chain).doFilter(request, response); slowRequestFilter.doFilter(request, response, chain); verify(logger).warn("Slow request: {} {} ({}ms)", "GET", "/some/path", 501L); } @Test void doesNotLogFastRequests() throws Exception { doAnswer(invocationOnMock -> { slowRequestFilter.setCurrentTimeProvider(() -> 1510330743000000L); return null; }).when(chain).doFilter(request, response); slowRequestFilter.doFilter(request, response, chain); verify(logger, never()).warn("Slow request: {} {} ({}ms)", "GET", "/some/path", 499L); } }
SlowRequestFilterTest
java
eclipse-vertx__vert.x
vertx-core/src/main/java/io/vertx/core/json/jackson/JsonArraySerializer.java
{ "start": 682, "end": 912 }
class ____ extends JsonSerializer<JsonArray> { @Override public void serialize(JsonArray value, JsonGenerator jgen, SerializerProvider provider) throws IOException { jgen.writeObject(value.getList()); } }
JsonArraySerializer
java
apache__hadoop
hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
{ "start": 5385, "end": 6780 }
class ____ extends Mapper<LongWritable, LongWritable, BooleanWritable, LongWritable> { /** Map method. * @param offset samples starting from the (offset+1)th sample. * @param size the number of samples for this map * @param context output {true-&gt;numInside, false-&gt;numOutside} */ public void map(LongWritable offset, LongWritable size, Context context) throws IOException, InterruptedException { final HaltonSequence haltonsequence = new HaltonSequence(offset.get()); long numInside = 0L; long numOutside = 0L; for(long i = 0; i < size.get(); ) { //generate points in a unit square final double[] point = haltonsequence.nextPoint(); //count points inside/outside of the inscribed circle of the square final double x = point[0] - 0.5; final double y = point[1] - 0.5; if (x*x + y*y > 0.25) { numOutside++; } else { numInside++; } //report status i++; if (i % 1000 == 0) { context.setStatus("Generated " + i + " samples."); } } //output map results context.write(new BooleanWritable(true), new LongWritable(numInside)); context.write(new BooleanWritable(false), new LongWritable(numOutside)); } } /** * Reducer
QmcMapper
java
spring-projects__spring-framework
spring-jdbc/src/main/java/org/springframework/jdbc/datasource/embedded/ConnectionProperties.java
{ "start": 1048, "end": 1111 }
interface ____ { /** * Set the JDBC driver
ConnectionProperties
java
apache__maven
compat/maven-compat/src/main/java/org/apache/maven/artifact/ArtifactScopeEnum.java
{ "start": 1030, "end": 3324 }
enum ____ { compile(1), test(2), runtime(3), provided(4), system(5), runtime_plus_system(6); public static final ArtifactScopeEnum DEFAULT_SCOPE = compile; private int id; // Constructor ArtifactScopeEnum(int id) { this.id = id; } int getId() { return id; } /** * Helper method to simplify null processing * * @param scope a scope or {@code null} * @return the provided scope or DEFAULT_SCOPE */ public static ArtifactScopeEnum checkScope(ArtifactScopeEnum scope) { return scope == null ? DEFAULT_SCOPE : scope; } /** * * @return unsafe String representation of this scope. */ public String getScope() { if (id == 1) { return Artifact.SCOPE_COMPILE; } else if (id == 2) { return Artifact.SCOPE_TEST; } else if (id == 3) { return Artifact.SCOPE_RUNTIME; } else if (id == 4) { return Artifact.SCOPE_PROVIDED; } else if (id == 5) { return Artifact.SCOPE_SYSTEM; } else { return Artifact.SCOPE_RUNTIME_PLUS_SYSTEM; } } private static final ArtifactScopeEnum[][][] COMPLIANCY_SETS = { {{compile}, {compile, provided, system}}, {{test}, {compile, test, provided, system}}, {{runtime}, {compile, runtime, system}}, {{provided}, {compile, test, provided}} }; /** * scope relationship function. Used by the graph conflict resolution policies * * @param scope a scope * @return true is supplied scope is an inclusive sub-scope of current one. */ public boolean encloses(ArtifactScopeEnum scope) { final ArtifactScopeEnum s = checkScope(scope); // system scope is historic only - and simple if (id == system.id) { return scope.id == system.id; } for (ArtifactScopeEnum[][] set : COMPLIANCY_SETS) { if (id == set[0][0].id) { for (ArtifactScopeEnum ase : set[1]) { if (s.id == ase.id) { return true; } } break; } } return false; } }
ArtifactScopeEnum
java
apache__camel
components/camel-smpp/src/test/java/org/apache/camel/component/smpp/AbstractSmppCommandTest.java
{ "start": 1368, "end": 3876 }
class ____ { private SMPPSession session = new SMPPSession(); private SmppConfiguration config = new SmppConfiguration(); private AbstractSmppCommand command; @BeforeEach public void setUp() { session = new SMPPSession(); config = new SmppConfiguration(); command = new AbstractSmppCommand(session, config) { @Override public void execute(Exchange exchange) { } }; } @Test public void constructor() { assertSame(session, command.session); assertSame(config, command.config); } @Test public void getResponseMessage() { Exchange inOnlyExchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOnly); Exchange inOutExchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut); assertSame(ExchangeHelper.getResultMessage(inOnlyExchange), inOnlyExchange.getIn()); /* NOTE: in this test it's important to call the methods in this order: 1. command.getResponseMessage 2. inOutExchange.getMessage This is so, because the empty out Message object is created by the getOut messaged called by command.getResponseMessage. Calling in the inverse order causes the hasOut check on getMessage() to return false, which, in turns, causes it to return the in message. Thus failing the test. */ Message expectedMessage = ExchangeHelper.getResultMessage(inOutExchange); Message verificationMessage = inOutExchange.getMessage(); assertSame(expectedMessage, verificationMessage); } @Test public void determineTypeClass() throws Exception { assertSame(OptionalParameter.Source_subaddress.class, command.determineTypeClass(Tag.SOURCE_SUBADDRESS)); assertSame(OptionalParameter.Additional_status_info_text.class, command.determineTypeClass(Tag.ADDITIONAL_STATUS_INFO_TEXT)); assertSame(OptionalParameter.Dest_addr_subunit.class, command.determineTypeClass(Tag.DEST_ADDR_SUBUNIT)); assertSame(OptionalParameter.Dest_telematics_id.class, command.determineTypeClass(Tag.DEST_TELEMATICS_ID)); assertSame(OptionalParameter.Qos_time_to_live.class, command.determineTypeClass(Tag.QOS_TIME_TO_LIVE)); assertSame(OptionalParameter.Alert_on_message_delivery.class, command.determineTypeClass(Tag.ALERT_ON_MESSAGE_DELIVERY)); } }
AbstractSmppCommandTest
java
spring-projects__spring-boot
documentation/spring-boot-actuator-docs/src/test/java/org/springframework/boot/actuate/docs/web/exchanges/HttpExchangesEndpointDocumentationTests.java
{ "start": 2317, "end": 5358 }
class ____ extends MockMvcEndpointDocumentationTests { @MockitoBean private HttpExchangeRepository repository; @Test void httpExchanges() { RecordableHttpRequest request = mock(RecordableHttpRequest.class); given(request.getUri()).willReturn(URI.create("https://api.example.com")); given(request.getMethod()).willReturn("GET"); given(request.getHeaders()) .willReturn(Collections.singletonMap(HttpHeaders.ACCEPT, List.of("application/json"))); RecordableHttpResponse response = mock(RecordableHttpResponse.class); given(response.getStatus()).willReturn(200); given(response.getHeaders()) .willReturn(Collections.singletonMap(HttpHeaders.CONTENT_TYPE, List.of("application/json"))); Principal principal = mock(Principal.class); given(principal.getName()).willReturn("alice"); Instant instant = Instant.parse("2022-12-22T13:43:41.00Z"); Clock start = Clock.fixed(instant, ZoneId.systemDefault()); Clock end = Clock.offset(start, Duration.ofMillis(23)); HttpExchange exchange = HttpExchange.start(start, request) .finish(end, response, () -> principal, () -> UUID.randomUUID().toString(), EnumSet.allOf(Include.class)); given(this.repository.findAll()).willReturn(List.of(exchange)); assertThat(this.mvc.get().uri("/actuator/httpexchanges")).hasStatusOk() .apply(document("httpexchanges", responseFields( fieldWithPath("exchanges").description("An array of HTTP request-response exchanges."), fieldWithPath("exchanges.[].timestamp").description("Timestamp of when the exchange occurred."), fieldWithPath("exchanges.[].principal").description("Principal of the exchange, if any.") .optional(), fieldWithPath("exchanges.[].principal.name").description("Name of the principal.").optional(), fieldWithPath("exchanges.[].request.method").description("HTTP method of the request."), fieldWithPath("exchanges.[].request.remoteAddress") .description("Remote address from which the request was received, if known.") .optional() .type(JsonFieldType.STRING), fieldWithPath("exchanges.[].request.uri").description("URI of the request."), fieldWithPath("exchanges.[].request.headers") .description("Headers of the request, keyed by header name."), fieldWithPath("exchanges.[].request.headers.*.[]").description("Values of the header"), fieldWithPath("exchanges.[].response.status").description("Status of the response"), fieldWithPath("exchanges.[].response.headers") .description("Headers of the response, keyed by header name."), fieldWithPath("exchanges.[].response.headers.*.[]").description("Values of the header"), fieldWithPath("exchanges.[].session").description("Session associated with the exchange, if any.") .optional(), fieldWithPath("exchanges.[].session.id").description("ID of the session."), fieldWithPath("exchanges.[].timeTaken").description("Time taken to handle the exchange.")))); } @Configuration(proxyBeanMethods = false) static
HttpExchangesEndpointDocumentationTests
java
apache__flink
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/serde/ColumnJsonDeserializer.java
{ "start": 3061, "end": 6106 }
class ____ extends StdDeserializer<Column> { private static final String SUPPORTED_KINDS = Arrays.toString(new String[] {KIND_PHYSICAL, KIND_COMPUTED, KIND_METADATA}); ColumnJsonDeserializer() { super(Column.class); } @Override public Column deserialize(JsonParser jsonParser, DeserializationContext ctx) throws IOException { ObjectNode jsonNode = jsonParser.readValueAsTree(); String columnName = jsonNode.required(NAME).asText(); String columnKind = Optional.ofNullable(jsonNode.get(KIND)).map(JsonNode::asText).orElse(KIND_PHYSICAL); Column column; switch (columnKind) { case KIND_PHYSICAL: column = deserializePhysicalColumn(columnName, jsonNode, jsonParser.getCodec(), ctx); break; case KIND_COMPUTED: column = deserializeComputedColumn(columnName, jsonNode, jsonParser.getCodec(), ctx); break; case KIND_METADATA: column = deserializeMetadataColumn(columnName, jsonNode, jsonParser.getCodec(), ctx); break; default: throw new ValidationException( String.format( "Cannot recognize column type '%s'. Allowed types: %s.", columnKind, SUPPORTED_KINDS)); } return column.withComment( deserializeOptionalField( jsonNode, COMMENT, String.class, jsonParser.getCodec(), ctx) .orElse(null)); } private static Column.PhysicalColumn deserializePhysicalColumn( String columnName, ObjectNode jsonNode, ObjectCodec codec, DeserializationContext ctx) throws IOException { return Column.physical( columnName, ctx.readValue(traverse(jsonNode.required(DATA_TYPE), codec), DataType.class)); } private static Column.ComputedColumn deserializeComputedColumn( String columnName, ObjectNode jsonNode, ObjectCodec codec, DeserializationContext ctx) throws IOException { return Column.computed( columnName, ctx.readValue( traverse(jsonNode.required(EXPRESSION), codec), ResolvedExpression.class)); } private static Column.MetadataColumn deserializeMetadataColumn( String columnName, ObjectNode jsonNode, ObjectCodec codec, DeserializationContext ctx) throws IOException { return Column.metadata( columnName, ctx.readValue(traverse(jsonNode.required(DATA_TYPE), codec), DataType.class), deserializeOptionalField(jsonNode, METADATA_KEY, String.class, codec, ctx) .orElse(null), jsonNode.required(IS_VIRTUAL).asBoolean()); } }
ColumnJsonDeserializer
java
apache__camel
components/camel-jt400/src/test/java/org/apache/camel/component/jt400/Jt400RouteTest.java
{ "start": 1365, "end": 2583 }
class ____ extends CamelTestSupport { // fill in correct values for all constants to test with a real AS/400 // system private static final String USER = "username"; private static final String PASSWORD = "password"; private static final String SYSTEM = null; private static final String LIBRARY = "library"; private static final String QUEUE = "queue"; @Test public void testBasicTest() throws Exception { if (SYSTEM != null) { MockEndpoint endpoint = getMockEndpoint("mock:a"); endpoint.expectedBodiesReceived("Test message"); sendBody("direct:a", "Test message"); endpoint.assertIsSatisfied(); } } @Override protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { @Override public void configure() { if (SYSTEM != null) { String uri = String.format("jt400://%s:%s@%s/QSYS.LIB/%s.LIB/%s.DTAQ", USER, PASSWORD, SYSTEM, LIBRARY, QUEUE); from("direct:a").to(uri); from(uri).to("mock:a"); } } }; } }
Jt400RouteTest
java
alibaba__nacos
persistence/src/test/java/com/alibaba/nacos/persistence/datasource/mock/MockResultSet.java
{ "start": 1271, "end": 22772 }
class ____ implements ResultSet { @Override public boolean next() throws SQLException { return false; } @Override public void close() throws SQLException { } @Override public boolean wasNull() throws SQLException { return false; } @Override public String getString(int columnIndex) throws SQLException { return ""; } @Override public boolean getBoolean(int columnIndex) throws SQLException { return false; } @Override public byte getByte(int columnIndex) throws SQLException { return 0; } @Override public short getShort(int columnIndex) throws SQLException { return 0; } @Override public int getInt(int columnIndex) throws SQLException { return 0; } @Override public long getLong(int columnIndex) throws SQLException { return 0; } @Override public float getFloat(int columnIndex) throws SQLException { return 0; } @Override public double getDouble(int columnIndex) throws SQLException { return 0; } @Override public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { return null; } @Override public byte[] getBytes(int columnIndex) throws SQLException { return new byte[0]; } @Override public Date getDate(int columnIndex) throws SQLException { return null; } @Override public Time getTime(int columnIndex) throws SQLException { return null; } @Override public Timestamp getTimestamp(int columnIndex) throws SQLException { return null; } @Override public InputStream getAsciiStream(int columnIndex) throws SQLException { return null; } @Override public InputStream getUnicodeStream(int columnIndex) throws SQLException { return null; } @Override public InputStream getBinaryStream(int columnIndex) throws SQLException { return null; } @Override public String getString(String columnLabel) throws SQLException { return ""; } @Override public boolean getBoolean(String columnLabel) throws SQLException { return false; } @Override public byte getByte(String columnLabel) throws SQLException { return 0; } @Override public short getShort(String columnLabel) throws SQLException { return 0; } @Override public int getInt(String columnLabel) throws SQLException { return 0; } @Override public long getLong(String columnLabel) throws SQLException { return 0; } @Override public float getFloat(String columnLabel) throws SQLException { return 0; } @Override public double getDouble(String columnLabel) throws SQLException { return 0; } @Override public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { return null; } @Override public byte[] getBytes(String columnLabel) throws SQLException { return new byte[0]; } @Override public Date getDate(String columnLabel) throws SQLException { return null; } @Override public Time getTime(String columnLabel) throws SQLException { return null; } @Override public Timestamp getTimestamp(String columnLabel) throws SQLException { return null; } @Override public InputStream getAsciiStream(String columnLabel) throws SQLException { return null; } @Override public InputStream getUnicodeStream(String columnLabel) throws SQLException { return null; } @Override public InputStream getBinaryStream(String columnLabel) throws SQLException { return null; } @Override public SQLWarning getWarnings() throws SQLException { return null; } @Override public void clearWarnings() throws SQLException { } @Override public String getCursorName() throws SQLException { return ""; } @Override public ResultSetMetaData getMetaData() throws SQLException { return null; } @Override public Object getObject(int columnIndex) throws SQLException { return null; } @Override public Object getObject(String columnLabel) throws SQLException { return null; } @Override public int findColumn(String columnLabel) throws SQLException { return 0; } @Override public Reader getCharacterStream(int columnIndex) throws SQLException { return null; } @Override public Reader getCharacterStream(String columnLabel) throws SQLException { return null; } @Override public BigDecimal getBigDecimal(int columnIndex) throws SQLException { return null; } @Override public BigDecimal getBigDecimal(String columnLabel) throws SQLException { return null; } @Override public boolean isBeforeFirst() throws SQLException { return false; } @Override public boolean isAfterLast() throws SQLException { return false; } @Override public boolean isFirst() throws SQLException { return false; } @Override public boolean isLast() throws SQLException { return false; } @Override public void beforeFirst() throws SQLException { } @Override public void afterLast() throws SQLException { } @Override public boolean first() throws SQLException { return false; } @Override public boolean last() throws SQLException { return false; } @Override public int getRow() throws SQLException { return 0; } @Override public boolean absolute(int row) throws SQLException { return false; } @Override public boolean relative(int rows) throws SQLException { return false; } @Override public boolean previous() throws SQLException { return false; } @Override public void setFetchDirection(int direction) throws SQLException { } @Override public int getFetchDirection() throws SQLException { return 0; } @Override public void setFetchSize(int rows) throws SQLException { } @Override public int getFetchSize() throws SQLException { return 0; } @Override public int getType() throws SQLException { return 0; } @Override public int getConcurrency() throws SQLException { return 0; } @Override public boolean rowUpdated() throws SQLException { return false; } @Override public boolean rowInserted() throws SQLException { return false; } @Override public boolean rowDeleted() throws SQLException { return false; } @Override public void updateNull(int columnIndex) throws SQLException { } @Override public void updateBoolean(int columnIndex, boolean x) throws SQLException { } @Override public void updateByte(int columnIndex, byte x) throws SQLException { } @Override public void updateShort(int columnIndex, short x) throws SQLException { } @Override public void updateInt(int columnIndex, int x) throws SQLException { } @Override public void updateLong(int columnIndex, long x) throws SQLException { } @Override public void updateFloat(int columnIndex, float x) throws SQLException { } @Override public void updateDouble(int columnIndex, double x) throws SQLException { } @Override public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { } @Override public void updateString(int columnIndex, String x) throws SQLException { } @Override public void updateBytes(int columnIndex, byte[] x) throws SQLException { } @Override public void updateDate(int columnIndex, Date x) throws SQLException { } @Override public void updateTime(int columnIndex, Time x) throws SQLException { } @Override public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { } @Override public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { } @Override public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { } @Override public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { } @Override public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { } @Override public void updateObject(int columnIndex, Object x) throws SQLException { } @Override public void updateNull(String columnLabel) throws SQLException { } @Override public void updateBoolean(String columnLabel, boolean x) throws SQLException { } @Override public void updateByte(String columnLabel, byte x) throws SQLException { } @Override public void updateShort(String columnLabel, short x) throws SQLException { } @Override public void updateInt(String columnLabel, int x) throws SQLException { } @Override public void updateLong(String columnLabel, long x) throws SQLException { } @Override public void updateFloat(String columnLabel, float x) throws SQLException { } @Override public void updateDouble(String columnLabel, double x) throws SQLException { } @Override public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { } @Override public void updateString(String columnLabel, String x) throws SQLException { } @Override public void updateBytes(String columnLabel, byte[] x) throws SQLException { } @Override public void updateDate(String columnLabel, Date x) throws SQLException { } @Override public void updateTime(String columnLabel, Time x) throws SQLException { } @Override public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { } @Override public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { } @Override public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { } @Override public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { } @Override public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { } @Override public void updateObject(String columnLabel, Object x) throws SQLException { } @Override public void insertRow() throws SQLException { } @Override public void updateRow() throws SQLException { } @Override public void deleteRow() throws SQLException { } @Override public void refreshRow() throws SQLException { } @Override public void cancelRowUpdates() throws SQLException { } @Override public void moveToInsertRow() throws SQLException { } @Override public void moveToCurrentRow() throws SQLException { } @Override public Statement getStatement() throws SQLException { return null; } @Override public Object getObject(int columnIndex, Map<String, Class<?>> map) throws SQLException { return null; } @Override public Ref getRef(int columnIndex) throws SQLException { return null; } @Override public Blob getBlob(int columnIndex) throws SQLException { return null; } @Override public Clob getClob(int columnIndex) throws SQLException { return null; } @Override public Array getArray(int columnIndex) throws SQLException { return null; } @Override public Object getObject(String columnLabel, Map<String, Class<?>> map) throws SQLException { return null; } @Override public Ref getRef(String columnLabel) throws SQLException { return null; } @Override public Blob getBlob(String columnLabel) throws SQLException { return null; } @Override public Clob getClob(String columnLabel) throws SQLException { return null; } @Override public Array getArray(String columnLabel) throws SQLException { return null; } @Override public Date getDate(int columnIndex, Calendar cal) throws SQLException { return null; } @Override public Date getDate(String columnLabel, Calendar cal) throws SQLException { return null; } @Override public Time getTime(int columnIndex, Calendar cal) throws SQLException { return null; } @Override public Time getTime(String columnLabel, Calendar cal) throws SQLException { return null; } @Override public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { return null; } @Override public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { return null; } @Override public URL getURL(int columnIndex) throws SQLException { return null; } @Override public URL getURL(String columnLabel) throws SQLException { return null; } @Override public void updateRef(int columnIndex, Ref x) throws SQLException { } @Override public void updateRef(String columnLabel, Ref x) throws SQLException { } @Override public void updateBlob(int columnIndex, Blob x) throws SQLException { } @Override public void updateBlob(String columnLabel, Blob x) throws SQLException { } @Override public void updateClob(int columnIndex, Clob x) throws SQLException { } @Override public void updateClob(String columnLabel, Clob x) throws SQLException { } @Override public void updateArray(int columnIndex, Array x) throws SQLException { } @Override public void updateArray(String columnLabel, Array x) throws SQLException { } @Override public RowId getRowId(int columnIndex) throws SQLException { return null; } @Override public RowId getRowId(String columnLabel) throws SQLException { return null; } @Override public void updateRowId(int columnIndex, RowId x) throws SQLException { } @Override public void updateRowId(String columnLabel, RowId x) throws SQLException { } @Override public int getHoldability() throws SQLException { return 0; } @Override public boolean isClosed() throws SQLException { return false; } @Override public void updateNString(int columnIndex, String nString) throws SQLException { } @Override public void updateNString(String columnLabel, String nString) throws SQLException { } @Override public void updateNClob(int columnIndex, NClob nClob) throws SQLException { } @Override public void updateNClob(String columnLabel, NClob nClob) throws SQLException { } @Override public NClob getNClob(int columnIndex) throws SQLException { return null; } @Override public NClob getNClob(String columnLabel) throws SQLException { return null; } @Override public SQLXML getSQLXML(int columnIndex) throws SQLException { return null; } @Override public SQLXML getSQLXML(String columnLabel) throws SQLException { return null; } @Override public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { } @Override public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { } @Override public String getNString(int columnIndex) throws SQLException { return ""; } @Override public String getNString(String columnLabel) throws SQLException { return ""; } @Override public Reader getNCharacterStream(int columnIndex) throws SQLException { return null; } @Override public Reader getNCharacterStream(String columnLabel) throws SQLException { return null; } @Override public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { } @Override public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { } @Override public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { } @Override public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { } @Override public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { } @Override public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { } @Override public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { } @Override public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { } @Override public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { } @Override public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { } @Override public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { } @Override public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { } @Override public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { } @Override public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { } @Override public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { } @Override public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { } @Override public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { } @Override public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { } @Override public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { } @Override public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { } @Override public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { } @Override public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { } @Override public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { } @Override public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { } @Override public void updateClob(int columnIndex, Reader reader) throws SQLException { } @Override public void updateClob(String columnLabel, Reader reader) throws SQLException { } @Override public void updateNClob(int columnIndex, Reader reader) throws SQLException { } @Override public void updateNClob(String columnLabel, Reader reader) throws SQLException { } @Override public <T> T getObject(int columnIndex, Class<T> type) throws SQLException { return null; } @Override public <T> T getObject(String columnLabel, Class<T> type) throws SQLException { return null; } @Override public <T> T unwrap(Class<T> iface) throws SQLException { return null; } @Override public boolean isWrapperFor(Class<?> iface) throws SQLException { return false; } }
MockResultSet
java
apache__avro
lang/java/thrift/src/test/java/org/apache/avro/thrift/test/Test.java
{ "start": 65363, "end": 72821 }
class ____ extends org.apache.thrift.scheme.TupleScheme<Test> { @Override public void write(org.apache.thrift.protocol.TProtocol prot, Test struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetBoolField()) { optionals.set(0); } if (struct.isSetByteField()) { optionals.set(1); } if (struct.isSetByteOptionalField()) { optionals.set(2); } if (struct.isSetI16Field()) { optionals.set(3); } if (struct.isSetI16OptionalField()) { optionals.set(4); } if (struct.isSetI32Field()) { optionals.set(5); } if (struct.isSetI64Field()) { optionals.set(6); } if (struct.isSetDoubleField()) { optionals.set(7); } if (struct.isSetStringField()) { optionals.set(8); } if (struct.isSetBinaryField()) { optionals.set(9); } if (struct.isSetMapField()) { optionals.set(10); } if (struct.isSetListField()) { optionals.set(11); } if (struct.isSetSetField()) { optionals.set(12); } if (struct.isSetEnumField()) { optionals.set(13); } if (struct.isSetStructField()) { optionals.set(14); } if (struct.isSetFooOrBar()) { optionals.set(15); } oprot.writeBitSet(optionals, 16); if (struct.isSetBoolField()) { oprot.writeBool(struct.boolField); } if (struct.isSetByteField()) { oprot.writeByte(struct.byteField); } if (struct.isSetByteOptionalField()) { oprot.writeByte(struct.byteOptionalField); } if (struct.isSetI16Field()) { oprot.writeI16(struct.i16Field); } if (struct.isSetI16OptionalField()) { oprot.writeI16(struct.i16OptionalField); } if (struct.isSetI32Field()) { oprot.writeI32(struct.i32Field); } if (struct.isSetI64Field()) { oprot.writeI64(struct.i64Field); } if (struct.isSetDoubleField()) { oprot.writeDouble(struct.doubleField); } if (struct.isSetStringField()) { oprot.writeString(struct.stringField); } if (struct.isSetBinaryField()) { oprot.writeBinary(struct.binaryField); } if (struct.isSetMapField()) { { oprot.writeI32(struct.mapField.size()); for (java.util.Map.Entry<java.lang.String, java.lang.Integer> _iter13 : struct.mapField.entrySet()) { oprot.writeString(_iter13.getKey()); oprot.writeI32(_iter13.getValue()); } } } if (struct.isSetListField()) { { oprot.writeI32(struct.listField.size()); for (int _iter14 : struct.listField) { oprot.writeI32(_iter14); } } } if (struct.isSetSetField()) { { oprot.writeI32(struct.setField.size()); for (int _iter15 : struct.setField) { oprot.writeI32(_iter15); } } } if (struct.isSetEnumField()) { oprot.writeI32(struct.enumField.getValue()); } if (struct.isSetStructField()) { struct.structField.write(oprot); } if (struct.isSetFooOrBar()) { struct.fooOrBar.write(oprot); } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Test struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(16); if (incoming.get(0)) { struct.boolField = iprot.readBool(); struct.setBoolFieldIsSet(true); } if (incoming.get(1)) { struct.byteField = iprot.readByte(); struct.setByteFieldIsSet(true); } if (incoming.get(2)) { struct.byteOptionalField = iprot.readByte(); struct.setByteOptionalFieldIsSet(true); } if (incoming.get(3)) { struct.i16Field = iprot.readI16(); struct.setI16FieldIsSet(true); } if (incoming.get(4)) { struct.i16OptionalField = iprot.readI16(); struct.setI16OptionalFieldIsSet(true); } if (incoming.get(5)) { struct.i32Field = iprot.readI32(); struct.setI32FieldIsSet(true); } if (incoming.get(6)) { struct.i64Field = iprot.readI64(); struct.setI64FieldIsSet(true); } if (incoming.get(7)) { struct.doubleField = iprot.readDouble(); struct.setDoubleFieldIsSet(true); } if (incoming.get(8)) { struct.stringField = iprot.readString(); struct.setStringFieldIsSet(true); } if (incoming.get(9)) { struct.binaryField = iprot.readBinary(); struct.setBinaryFieldIsSet(true); } if (incoming.get(10)) { { org.apache.thrift.protocol.TMap _map16 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32); struct.mapField = new java.util.HashMap<java.lang.String, java.lang.Integer>(2 * _map16.size); @org.apache.thrift.annotation.Nullable java.lang.String _key17; int _val18; for (int _i19 = 0; _i19 < _map16.size; ++_i19) { _key17 = iprot.readString(); _val18 = iprot.readI32(); struct.mapField.put(_key17, _val18); } } struct.setMapFieldIsSet(true); } if (incoming.get(11)) { { org.apache.thrift.protocol.TList _list20 = iprot.readListBegin(org.apache.thrift.protocol.TType.I32); struct.listField = new java.util.ArrayList<java.lang.Integer>(_list20.size); int _elem21; for (int _i22 = 0; _i22 < _list20.size; ++_i22) { _elem21 = iprot.readI32(); struct.listField.add(_elem21); } } struct.setListFieldIsSet(true); } if (incoming.get(12)) { { org.apache.thrift.protocol.TSet _set23 = iprot.readSetBegin(org.apache.thrift.protocol.TType.I32); struct.setField = new java.util.HashSet<java.lang.Integer>(2 * _set23.size); int _elem24; for (int _i25 = 0; _i25 < _set23.size; ++_i25) { _elem24 = iprot.readI32(); struct.setField.add(_elem24); } } struct.setSetFieldIsSet(true); } if (incoming.get(13)) { struct.enumField = org.apache.avro.thrift.test.E.findByValue(iprot.readI32()); struct.setEnumFieldIsSet(true); } if (incoming.get(14)) { struct.structField = new Nested(); struct.structField.read(iprot); struct.setStructFieldIsSet(true); } if (incoming.get(15)) { struct.fooOrBar = new FooOrBar(); struct.fooOrBar.read(iprot); struct.setFooOrBarIsSet(true); } } } private static <S extends org.apache.thrift.scheme.IScheme> S scheme(org.apache.thrift.protocol.TProtocol proto) { return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); } }
TestTupleScheme
java
apache__hadoop
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapProgress.java
{ "start": 6503, "end": 7441 }
class ____ extends Task.TaskReporter { private int recordNum = 0; // number of records processed TestTaskReporter(Task task) { task.super(task.getProgress(), fakeUmbilical); } @Override public void setProgress(float progress) { super.setProgress(progress); float mapTaskProgress = map.getProgress().getProgress(); LOG.info("Map task progress is " + mapTaskProgress); if (recordNum < 3) { // only 3 records are there; Ignore validating progress after 3 times recordNum++; } else { return; } // validate map task progress when the map task is in map phase assertTrue(Math.abs(mapTaskProgress - ((float)recordNum/3)) < 0.001, "Map progress is not the expected value."); } } /** * Map Task that overrides run method and uses TestTaskReporter instead of * TaskReporter and uses FakeUmbilical. */
TestTaskReporter
java
apache__avro
lang/java/avro/src/main/java/org/apache/avro/Schema.java
{ "start": 11131, "end": 11299 }
enum ____ fixed, add an alias. */ public void addAlias(String alias) { throw new AvroRuntimeException("Not a named type: " + this); } /** If this is a record,
or
java
google__dagger
dagger-compiler/main/java/dagger/internal/codegen/xprocessing/XTypeElements.java
{ "start": 2373, "end": 8800 }
interface ____ handle both methods and types. /** Returns the type arguments for the given type as a list of {@link TypeVariableName}. */ public static ImmutableList<XTypeName> typeVariableNames(XTypeElement typeElement) { return typeElement.getTypeParameters().stream() .map(XTypeParameterElement::asTypeVariableName) .collect(toImmutableList()); } /** Returns {@code true} if the given element is nested. */ public static boolean isNested(XTypeElement typeElement) { return typeElement.getEnclosingTypeElement() != null; } /** Returns {@code true} if the given {@code type} has type parameters. */ public static boolean hasTypeParameters(XTypeElement typeElement) { return !typeElement.getTypeParameters().isEmpty(); } /** Returns all non-private, non-static, abstract methods in {@code type}. */ public static ImmutableList<XMethodElement> getAllUnimplementedMethods(XTypeElement type) { return getAllNonPrivateInstanceMethods(type).stream() .filter(XHasModifiers::isAbstract) .collect(toImmutableList()); } /** Returns all non-private, non-static methods in {@code type}. */ public static ImmutableList<XMethodElement> getAllNonPrivateInstanceMethods(XTypeElement type) { return getAllMethods(type).stream() .filter(method -> !method.isPrivate() && !method.isStatic()) .collect(toImmutableList()); } // TODO(bcorso): rename this to getAllMethodsWithoutPrivate, since the private method declared // within this element is being filtered out. This doesn't mirror {@code // MoreElements#getAllMethods}'s behavior but have the same name, and can cause confusion to // developers. public static ImmutableList<XMethodElement> getAllMethods(XTypeElement type) { return asStream(type.getAllMethods()) .filter(method -> isAccessibleFrom(method, type)) .collect(toImmutableList()); } public static ImmutableList<XMethodElement> getAllMethodsIncludingPrivate(XTypeElement type) { return asStream(type.getAllMethods()).collect(toImmutableList()); } private static boolean isAccessibleFrom(XMethodElement method, XTypeElement type) { if (method.isPublic() || method.isProtected()) { return true; } if (method.isPrivate()) { return false; } return method .getClosestMemberContainer() .getClassName() .packageName() .equals(type.getClassName().packageName()); } public static boolean isEffectivelyPublic(XTypeElement element) { return allVisibilities(element).stream() .allMatch(visibility -> visibility.equals(Visibility.PUBLIC)); } public static boolean isEffectivelyPrivate(XTypeElement element) { return allVisibilities(element).contains(Visibility.PRIVATE); } public static boolean isJvmClass(XTypeElement element) { return element.isClass() || element.isKotlinObject() || element.isCompanionObject(); } /** * Returns a list of visibilities containing visibility of the given element and the visibility of * its enclosing elements. */ private static ImmutableSet<Visibility> allVisibilities(XTypeElement element) { checkNotNull(element); ImmutableSet.Builder<Visibility> visibilities = ImmutableSet.builder(); XTypeElement currentElement = element; while (currentElement != null) { visibilities.add(Visibility.of(currentElement)); currentElement = currentElement.getEnclosingTypeElement(); } return visibilities.build(); } /** * Returns a string representation of {@link XTypeElement} that is independent of the backend * (javac/ksp). * * <p>This method is similar to {@link XElements#toStableString(XElement)} and * {@link XTypes#toStableString(XType)}, but this string representation includes the type variables and * their bounds, e.g. {@code Foo<T extends Comparable<T>>}. This is useful for error messages that * need to reference the type variable bounds. */ public static String toStableString(XTypeElement typeElement) { try { return toStableString(typeElement.getType().getTypeName(), new HashSet<>(), /* depth= */ 0); } catch (TypeNotPresentException e) { return e.typeName(); } } private static String toStableString(TypeName typeName, Set<TypeName> visited, int depth) { if (typeName instanceof ClassName) { return ((ClassName) typeName).canonicalName(); } else if (typeName instanceof ArrayTypeName) { return String.format( "%s[]", toStableString(((ArrayTypeName) typeName).componentType, visited, depth + 1)); } else if (typeName instanceof ParameterizedTypeName) { ParameterizedTypeName parameterizedTypeName = (ParameterizedTypeName) typeName; return String.format( "%s<%s>", parameterizedTypeName.rawType, parameterizedTypeName.typeArguments.stream() .map(typeArgument -> toStableString(typeArgument, visited, depth + 1)) // We purposely don't use a space after the comma to for backwards compatibility with // usages that depended on the previous TypeMirror#toString() implementation. .collect(joining(","))); } else if (typeName instanceof WildcardTypeName) { WildcardTypeName wildcardTypeName = (WildcardTypeName) typeName; // Wildcard types have exactly 1 upper bound. TypeName upperBound = getOnlyElement(wildcardTypeName.upperBounds); if (!upperBound.equals(TypeName.OBJECT)) { // Wildcards with non-Object upper bounds can't have lower bounds. checkState(wildcardTypeName.lowerBounds.isEmpty()); return String.format("? extends %s", toStableString(upperBound, visited, depth + 1)); } if (!wildcardTypeName.lowerBounds.isEmpty()) { // Wildcard types can have at most 1 lower bound. TypeName lowerBound = getOnlyElement(wildcardTypeName.lowerBounds); return String.format("? super %s", toStableString(lowerBound, visited, depth + 1)); } // If the upper bound is Object and there is no lower bound then just use "?". return "?"; } else if (typeName instanceof TypeVariableName) { // The idea here is that for an XTypeElement with type variables, we only want to include the // bounds in the definition, i.e. at depth == 1, and not every time the type variable is // referenced. For example, for `
to
java
alibaba__druid
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/createTable/MySqlCreateTableTest3.java
{ "start": 1025, "end": 2381 }
class ____ extends MysqlTest { public void test_0() throws Exception { String sql = "CREATE TABLE tk (col1 INT, col2 CHAR(5), col3 DATE) PARTITION BY KEY(col3) PARTITIONS 4;"; MySqlStatementParser parser = new MySqlStatementParser(sql); List<SQLStatement> statementList = parser.parseStatementList(); SQLStatement statemen = statementList.get(0); // print(statementList); assertEquals(1, statementList.size()); MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor(); statemen.accept(visitor); // System.out.println("Tables : " + visitor.getTables()); // System.out.println("fields : " + visitor.getColumns()); // System.out.println("coditions : " + visitor.getConditions()); // System.out.println("orderBy : " + visitor.getOrderByColumns()); assertEquals(1, visitor.getTables().size()); assertEquals(3, visitor.getColumns().size()); assertEquals(0, visitor.getConditions().size()); assertTrue(visitor.getTables().containsKey(new TableStat.Name("tk"))); assertTrue(visitor.getColumns().contains(new Column("tk", "col1"))); assertTrue(visitor.getColumns().contains(new Column("tk", "col2"))); assertTrue(visitor.getColumns().contains(new Column("tk", "col3"))); } }
MySqlCreateTableTest3
java
apache__flink
flink-datastream/src/main/java/org/apache/flink/streaming/runtime/translators/DataStreamV2SinkTransformationTranslator.java
{ "start": 3305, "end": 5874 }
class ____<Input, Output> implements TransformationTranslator<Output, DataStreamV2SinkTransformation<Input, Output>> { private static final String COMMITTER_NAME = "Committer"; private static final String WRITER_NAME = "Writer"; @Override public Collection<Integer> translateForBatch( DataStreamV2SinkTransformation<Input, Output> transformation, Context context) { return translateInternal(transformation, context, true); } @Override public Collection<Integer> translateForStreaming( DataStreamV2SinkTransformation<Input, Output> transformation, Context context) { return translateInternal(transformation, context, false); } private Collection<Integer> translateInternal( DataStreamV2SinkTransformation<Input, Output> transformation, Context context, boolean batch) { SinkExpander<Input> expander = new SinkExpander<>( transformation.getInputStream(), transformation.getSink(), transformation, context, batch); expander.expand(); return Collections.emptyList(); } @SuppressWarnings("rawtypes,unchecked") public static void registerSinkTransformationTranslator() throws Exception { final Field translatorMapField = StreamGraphGenerator.class.getDeclaredField("translatorMap"); translatorMapField.setAccessible(true); final Map<Class<? extends Transformation>, TransformationTranslator<?, ?>> translatorMap = (Map<Class<? extends Transformation>, TransformationTranslator<?, ?>>) translatorMapField.get(null); final Field underlyingMapField = translatorMap.getClass().getDeclaredField("m"); underlyingMapField.setAccessible(true); final Map<Class<? extends Transformation>, TransformationTranslator<?, ?>> underlyingMap = (Map<Class<? extends Transformation>, TransformationTranslator<?, ?>>) underlyingMapField.get(translatorMap); underlyingMap.put( DataStreamV2SinkTransformation.class, new DataStreamV2SinkTransformationTranslator<>()); } /** * Expands the Sink to a sub-topology. Currently, user-defined topologies are not supported. * That is, sub-topologies will contain only committers and writers. */ private static
DataStreamV2SinkTransformationTranslator
java
elastic__elasticsearch
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java
{ "start": 2072, "end": 7421 }
class ____ extends UnaryPlan implements NamedWriteable, SurrogateLogicalPlan, TelemetryAware, SortAgnostic, PostAnalysisPlanVerificationAware { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( LogicalPlan.class, "InlineStats", InlineStats::new ); private final Aggregate aggregate; private List<Attribute> lazyOutput; public InlineStats(Source source, Aggregate aggregate) { super(source, aggregate); this.aggregate = aggregate; } public InlineStats(StreamInput in) throws IOException { this(Source.readFrom((PlanStreamInput) in), (Aggregate) in.readNamedWriteable(LogicalPlan.class)); } @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); out.writeNamedWriteable(aggregate); } @Override public String getWriteableName() { return ENTRY.name; } @Override protected NodeInfo<InlineStats> info() { return NodeInfo.create(this, InlineStats::new, aggregate); } @Override public InlineStats replaceChild(LogicalPlan newChild) { return new InlineStats(source(), (Aggregate) newChild); } public Aggregate aggregate() { return aggregate; } @Override public boolean expressionsResolved() { return aggregate.expressionsResolved(); } @Override public List<Attribute> output() { if (this.lazyOutput == null) { this.lazyOutput = mergeOutputAttributes(aggregate.output(), aggregate.child().output()); } return lazyOutput; } // TODO: in case of INLINE STATS, the join key is always the grouping private JoinConfig joinConfig() { List<Expression> groupings = aggregate.groupings(); List<Attribute> namedGroupings = new ArrayList<>(groupings.size()); for (Expression g : groupings) { namedGroupings.add(Expressions.attribute(g)); } // last named grouping wins, just like it happens for regular STATS // ie BY x = field_1, x = field_2, the grouping is actually performed on second x (field_2) namedGroupings = mergeOutputAttributes(namedGroupings, emptyList()); List<Attribute> leftFields = new ArrayList<>(groupings.size()); List<Attribute> rightFields = new ArrayList<>(groupings.size()); List<Attribute> rhsOutput = Join.makeReference(aggregate.output()); for (Attribute lhs : namedGroupings) { for (Attribute rhs : rhsOutput) { if (lhs.name().equals(rhs.name())) { leftFields.add(lhs); rightFields.add(rhs); break; } } } return new JoinConfig(JoinTypes.LEFT, leftFields, rightFields, null); } @Override public LogicalPlan surrogate() { // left join between the main relation and the local, lookup relation Source source = source(); LogicalPlan left = aggregate.child(); return new InlineJoin(source, left, InlineJoin.stubSource(aggregate, left), joinConfig()); } @Override public String telemetryLabel() { return "INLINE STATS"; } @Override public int hashCode() { return Objects.hash(aggregate, child()); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } InlineStats other = (InlineStats) obj; return Objects.equals(aggregate, other.aggregate); } @Override public BiConsumer<LogicalPlan, Failures> postAnalysisPlanVerification() { return (p, failures) -> { // Allow inline stats to be used with TS command if it follows a STATS command // Examples: // valid: TS metrics | STATS ... // valid: TS metrics | STATS ... | INLINE STATS ... // invalid: TS metrics | INLINE STATS ... // invalid: TS metrics | INLINE STATS ... | STATS ... if (p instanceof InlineStats inlineStats) { Holder<Boolean> foundInlineStats = new Holder<>(false); Holder<Boolean> foundPreviousStats = new Holder<>(false); Holder<Boolean> isTimeSeries = new Holder<>(false); inlineStats.child().forEachUp(lp -> { if (lp instanceof Aggregate) { if (foundInlineStats.get() == false) { foundInlineStats.set(true); } else { foundPreviousStats.set(true); } } else if (lp instanceof EsRelation er && er.indexMode() == IndexMode.TIME_SERIES) { isTimeSeries.set(true); } }); if (isTimeSeries.get() && foundPreviousStats.get() == false) { failures.add( fail(inlineStats, "INLINE STATS [{}] can only be used after STATS when used with TS command", this.sourceText()) ); } } }; } }
InlineStats
java
elastic__elasticsearch
client/rest/src/main/java/org/elasticsearch/client/HttpGetWithEntity.java
{ "start": 1068, "end": 1343 }
class ____ extends HttpEntityEnclosingRequestBase { static final String METHOD_NAME = HttpGet.METHOD_NAME; HttpGetWithEntity(final URI uri) { setURI(uri); } @Override public String getMethod() { return METHOD_NAME; } }
HttpGetWithEntity
java
apache__camel
components/camel-hazelcast/src/generated/java/org/apache/camel/component/hazelcast/atomicnumber/HazelcastAtomicnumberEndpointConfigurer.java
{ "start": 749, "end": 3672 }
class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter { @Override public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) { HazelcastAtomicnumberEndpoint target = (HazelcastAtomicnumberEndpoint) obj; switch (ignoreCase ? name.toLowerCase() : name) { case "defaultoperation": case "defaultOperation": target.setDefaultOperation(property(camelContext, org.apache.camel.component.hazelcast.HazelcastOperation.class, value)); return true; case "hazelcastconfiguri": case "hazelcastConfigUri": target.setHazelcastConfigUri(property(camelContext, java.lang.String.class, value)); return true; case "hazelcastinstance": case "hazelcastInstance": target.setHazelcastInstance(property(camelContext, com.hazelcast.core.HazelcastInstance.class, value)); return true; case "hazelcastinstancename": case "hazelcastInstanceName": target.setHazelcastInstanceName(property(camelContext, java.lang.String.class, value)); return true; case "lazystartproducer": case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true; default: return false; } } @Override public Class<?> getOptionType(String name, boolean ignoreCase) { switch (ignoreCase ? name.toLowerCase() : name) { case "defaultoperation": case "defaultOperation": return org.apache.camel.component.hazelcast.HazelcastOperation.class; case "hazelcastconfiguri": case "hazelcastConfigUri": return java.lang.String.class; case "hazelcastinstance": case "hazelcastInstance": return com.hazelcast.core.HazelcastInstance.class; case "hazelcastinstancename": case "hazelcastInstanceName": return java.lang.String.class; case "lazystartproducer": case "lazyStartProducer": return boolean.class; default: return null; } } @Override public Object getOptionValue(Object obj, String name, boolean ignoreCase) { HazelcastAtomicnumberEndpoint target = (HazelcastAtomicnumberEndpoint) obj; switch (ignoreCase ? name.toLowerCase() : name) { case "defaultoperation": case "defaultOperation": return target.getDefaultOperation(); case "hazelcastconfiguri": case "hazelcastConfigUri": return target.getHazelcastConfigUri(); case "hazelcastinstance": case "hazelcastInstance": return target.getHazelcastInstance(); case "hazelcastinstancename": case "hazelcastInstanceName": return target.getHazelcastInstanceName(); case "lazystartproducer": case "lazyStartProducer": return target.isLazyStartProducer(); default: return null; } } }
HazelcastAtomicnumberEndpointConfigurer
java
apache__maven
impl/maven-core/src/main/java/org/apache/maven/lifecycle/internal/LifecycleDependencyResolver.java
{ "start": 2548, "end": 15100 }
class ____ { private final Logger logger = LoggerFactory.getLogger(getClass()); private final ProjectDependenciesResolver dependenciesResolver; private final ProjectArtifactFactory artifactFactory; private final EventSpyDispatcher eventSpyDispatcher; private final ProjectArtifactsCache projectArtifactsCache; private final MessageBuilderFactory messageBuilderFactory; @Inject public LifecycleDependencyResolver( ProjectDependenciesResolver dependenciesResolver, ProjectArtifactFactory artifactFactory, EventSpyDispatcher eventSpyDispatcher, ProjectArtifactsCache projectArtifactsCache, MessageBuilderFactory messageBuilderFactory) { this.dependenciesResolver = dependenciesResolver; this.artifactFactory = artifactFactory; this.eventSpyDispatcher = eventSpyDispatcher; this.projectArtifactsCache = projectArtifactsCache; this.messageBuilderFactory = messageBuilderFactory; } public static List<MavenProject> getProjects(MavenProject project, MavenSession session, boolean aggregator) { if (aggregator && project.getCollectedProjects() != null) { // get the unsorted list of wanted projects Set<MavenProject> projectAndSubmodules = new HashSet<>(project.getCollectedProjects()); projectAndSubmodules.add(project); return session.getProjects().stream() // sorted all .filter(projectAndSubmodules::contains) .collect(Collectors.toList()); // sorted and filtered to what we need } else { return Collections.singletonList(project); } } public void resolveProjectDependencies( MavenProject project, Collection<String> scopesToCollect, Collection<String> scopesToResolve, MavenSession session, boolean aggregating, Set<Artifact> projectArtifacts) throws LifecycleExecutionException { ClassLoader tccl = Thread.currentThread().getContextClassLoader(); try { ClassLoader projectRealm = project.getClassRealm(); if (projectRealm != null && projectRealm != tccl) { Thread.currentThread().setContextClassLoader(projectRealm); } if (project.getDependencyArtifacts() == null) { try { project.setDependencyArtifacts(artifactFactory.createArtifacts(project)); } catch (InvalidDependencyVersionException e) { throw new LifecycleExecutionException(e); } } Set<Artifact> resolvedArtifacts = resolveProjectArtifacts( project, scopesToCollect, scopesToResolve, session, aggregating, projectArtifacts); Map<Artifact, File> reactorProjects = new HashMap<>(session.getProjects().size()); for (MavenProject reactorProject : session.getProjects()) { reactorProjects.put( reactorProject.getArtifact(), reactorProject.getArtifact().getFile()); } Map<String, Artifact> map = new HashMap<>(); for (Artifact artifact : resolvedArtifacts) { /** * MNG-6300: resolvedArtifacts can be cache result; this ensures reactor files are always up-to-date * During lifecycle the Artifact.getFile() can change from target/classes to the actual jar. * This clearly shows that target/classes should not be abused as artifactFile just for the classpath */ File reactorProjectFile = reactorProjects.get(artifact); if (reactorProjectFile != null) { artifact.setFile(reactorProjectFile); } map.put(artifact.getDependencyConflictId(), artifact); } project.setResolvedArtifacts(resolvedArtifacts); for (Artifact artifact : project.getDependencyArtifacts()) { if (artifact.getFile() == null) { Artifact resolved = map.get(artifact.getDependencyConflictId()); if (resolved != null) { artifact.setFile(resolved.getFile()); artifact.setDependencyTrail(resolved.getDependencyTrail()); artifact.setResolvedVersion(resolved.getVersion()); artifact.setResolved(true); } } } } finally { Thread.currentThread().setContextClassLoader(tccl); } } public DependencyResolutionResult getProjectDependencyResolutionResult( MavenProject project, Collection<String> scopesToCollect, Collection<String> scopesToResolve, MavenSession session, boolean aggregating, Set<Artifact> projectArtifacts) throws LifecycleExecutionException { Set<Artifact> resolvedArtifacts = resolveProjectArtifacts( project, scopesToCollect, scopesToResolve, session, aggregating, projectArtifacts); if (resolvedArtifacts instanceof ProjectArtifactsCache.ArtifactsSetWithResult artifactsSetWithResult) { return artifactsSetWithResult.getResult(); } else { throw new IllegalStateException(); } } public Set<Artifact> resolveProjectArtifacts( MavenProject project, Collection<String> scopesToCollect, Collection<String> scopesToResolve, MavenSession session, boolean aggregating, Set<Artifact> projectArtifacts) throws LifecycleExecutionException { ProjectArtifactsCache.Key cacheKey = projectArtifactsCache.createKey( project, scopesToCollect, scopesToResolve, aggregating, session.getRepositorySession()); ProjectArtifactsCache.CacheRecord recordArtifacts; recordArtifacts = projectArtifactsCache.get(cacheKey); if (recordArtifacts == null) { synchronized (cacheKey) { recordArtifacts = projectArtifactsCache.get(cacheKey); if (recordArtifacts == null) { try { Set<Artifact> resolvedArtifacts = getDependencies( project, scopesToCollect, scopesToResolve, session, aggregating, projectArtifacts); recordArtifacts = projectArtifactsCache.put(cacheKey, resolvedArtifacts); } catch (LifecycleExecutionException e) { projectArtifactsCache.put(cacheKey, e); projectArtifactsCache.register(project, cacheKey, recordArtifacts); throw e; } } } } projectArtifactsCache.register(project, cacheKey, recordArtifacts); return recordArtifacts.getArtifacts(); } private Set<Artifact> getDependencies( MavenProject project, Collection<String> scopesToCollect, Collection<String> scopesToResolve, MavenSession session, boolean aggregating, Set<Artifact> projectArtifacts) throws LifecycleExecutionException { if (scopesToCollect == null) { scopesToCollect = Collections.emptySet(); } if (scopesToResolve == null) { scopesToResolve = Collections.emptySet(); } if (scopesToCollect.isEmpty() && scopesToResolve.isEmpty()) { return new SetWithResolutionResult(null, new LinkedHashSet<>()); } scopesToCollect = new HashSet<>(scopesToCollect); scopesToCollect.addAll(scopesToResolve); DependencyFilter collectionFilter = new ScopeDependencyFilter(null, negate(scopesToCollect)); DependencyFilter resolutionFilter = new ScopeDependencyFilter(null, negate(scopesToResolve)); resolutionFilter = AndDependencyFilter.newInstance(collectionFilter, resolutionFilter); resolutionFilter = AndDependencyFilter.newInstance(resolutionFilter, new ReactorDependencyFilter(projectArtifacts)); DependencyResolutionResult result; try { DefaultDependencyResolutionRequest request = new DefaultDependencyResolutionRequest(project, session.getRepositorySession()); request.setResolutionFilter(resolutionFilter); eventSpyDispatcher.onEvent(request); result = dependenciesResolver.resolve(request); } catch (DependencyResolutionException e) { result = e.getResult(); /* * MNG-2277, the check below compensates for our bad plugin support where we ended up with aggregator * plugins that require dependency resolution, although they usually run in phases of the build where project * artifacts haven't been assembled yet. The prime example of this is "mvn release:prepare". */ if (aggregating && areAllDependenciesInReactor(session.getProjects(), result.getUnresolvedDependencies())) { logger.warn("The following dependencies could not be resolved at this point of the build" + " but seem to be part of the reactor:"); for (Dependency dependency : result.getUnresolvedDependencies()) { logger.warn("o {}", dependency); } logger.warn("Try running the build up to the lifecycle phase \"package\""); } else { throw new LifecycleExecutionException(messageBuilderFactory, null, project, e); } } eventSpyDispatcher.onEvent(result); Set<Artifact> artifacts = new LinkedHashSet<>(); if (result.getDependencyGraph() != null && !result.getDependencyGraph().getChildren().isEmpty()) { RepositoryUtils.toArtifacts( artifacts, result.getDependencyGraph().getChildren(), Collections.singletonList(project.getArtifact().getId()), collectionFilter); } return new SetWithResolutionResult(result, artifacts); } private boolean areAllDependenciesInReactor( Collection<MavenProject> projects, Collection<Dependency> dependencies) { Set<String> projectKeys = getReactorProjectKeys(projects); for (Dependency dependency : dependencies) { org.eclipse.aether.artifact.Artifact a = dependency.getArtifact(); String key = ArtifactUtils.key(a.getGroupId(), a.getArtifactId(), a.getVersion()); if (!projectKeys.contains(key)) { return false; } } return true; } private Set<String> getReactorProjectKeys(Collection<MavenProject> projects) { Set<String> projectKeys = new HashSet<>(projects.size() * 2); for (MavenProject project : projects) { String key = ArtifactUtils.key(project.getGroupId(), project.getArtifactId(), project.getVersion()); projectKeys.add(key); } return projectKeys; } private Collection<String> negate(Collection<String> scopes) { Collection<String> result = new HashSet<>(); Collections.addAll(result, "system", "compile", "provided", "runtime", "test"); for (String scope : scopes) { if ("compile".equals(scope)) { result.remove("compile"); result.remove("system"); result.remove("provided"); } else if ("runtime".equals(scope)) { result.remove("compile"); result.remove("runtime"); } else if ("compile+runtime".equals(scope)) { result.remove("compile"); result.remove("system"); result.remove("provided"); result.remove("runtime"); } else if ("runtime+system".equals(scope)) { result.remove("compile"); result.remove("system"); result.remove("runtime"); } else if ("test".equals(scope)) { result.clear(); } } return result; } private static
LifecycleDependencyResolver
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/NettyEndpointBuilderFactory.java
{ "start": 69726, "end": 95284 }
interface ____ extends EndpointProducerBuilder { default AdvancedNettyEndpointProducerBuilder advanced() { return (AdvancedNettyEndpointProducerBuilder) this; } /** * Whether or not to disconnect(close) from Netty Channel right after * use. * * The option is a: <code>boolean</code> type. * * Default: false * Group: common * * @param disconnect the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder disconnect(boolean disconnect) { doSetProperty("disconnect", disconnect); return this; } /** * Whether or not to disconnect(close) from Netty Channel right after * use. * * The option will be converted to a <code>boolean</code> type. * * Default: false * Group: common * * @param disconnect the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder disconnect(String disconnect) { doSetProperty("disconnect", disconnect); return this; } /** * Setting to ensure socket is not closed due to inactivity. * * The option is a: <code>boolean</code> type. * * Default: true * Group: common * * @param keepAlive the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder keepAlive(boolean keepAlive) { doSetProperty("keepAlive", keepAlive); return this; } /** * Setting to ensure socket is not closed due to inactivity. * * The option will be converted to a <code>boolean</code> type. * * Default: true * Group: common * * @param keepAlive the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder keepAlive(String keepAlive) { doSetProperty("keepAlive", keepAlive); return this; } /** * Setting to facilitate socket multiplexing. * * The option is a: <code>boolean</code> type. * * Default: true * Group: common * * @param reuseAddress the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder reuseAddress(boolean reuseAddress) { doSetProperty("reuseAddress", reuseAddress); return this; } /** * Setting to facilitate socket multiplexing. * * The option will be converted to a <code>boolean</code> type. * * Default: true * Group: common * * @param reuseAddress the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder reuseAddress(String reuseAddress) { doSetProperty("reuseAddress", reuseAddress); return this; } /** * This option allows producers and consumers (in client mode) to reuse * the same Netty Channel for the lifecycle of processing the Exchange. * This is useful if you need to call a server multiple times in a Camel * route and want to use the same network connection. When using this, * the channel is not returned to the connection pool until the Exchange * is done; or disconnected if the disconnect option is set to true. The * reused Channel is stored on the Exchange as an exchange property with * the key CamelNettyChannel which allows you to obtain the channel * during routing and use it as well. * * The option is a: <code>boolean</code> type. * * Default: false * Group: common * * @param reuseChannel the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder reuseChannel(boolean reuseChannel) { doSetProperty("reuseChannel", reuseChannel); return this; } /** * This option allows producers and consumers (in client mode) to reuse * the same Netty Channel for the lifecycle of processing the Exchange. * This is useful if you need to call a server multiple times in a Camel * route and want to use the same network connection. When using this, * the channel is not returned to the connection pool until the Exchange * is done; or disconnected if the disconnect option is set to true. The * reused Channel is stored on the Exchange as an exchange property with * the key CamelNettyChannel which allows you to obtain the channel * during routing and use it as well. * * The option will be converted to a <code>boolean</code> type. * * Default: false * Group: common * * @param reuseChannel the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder reuseChannel(String reuseChannel) { doSetProperty("reuseChannel", reuseChannel); return this; } /** * Setting to set endpoint as one-way (false) or request-response * (true). * * The option is a: <code>boolean</code> type. * * Default: true * Group: common * * @param sync the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder sync(boolean sync) { doSetProperty("sync", sync); return this; } /** * Setting to set endpoint as one-way (false) or request-response * (true). * * The option will be converted to a <code>boolean</code> type. * * Default: true * Group: common * * @param sync the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder sync(String sync) { doSetProperty("sync", sync); return this; } /** * Setting to improve TCP protocol performance. * * The option is a: <code>boolean</code> type. * * Default: true * Group: common * * @param tcpNoDelay the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder tcpNoDelay(boolean tcpNoDelay) { doSetProperty("tcpNoDelay", tcpNoDelay); return this; } /** * Setting to improve TCP protocol performance. * * The option will be converted to a <code>boolean</code> type. * * Default: true * Group: common * * @param tcpNoDelay the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder tcpNoDelay(String tcpNoDelay) { doSetProperty("tcpNoDelay", tcpNoDelay); return this; } /** * Time to wait for a socket connection to be available. Value is in * milliseconds. * * The option is a: <code>int</code> type. * * Default: 10000 * Group: producer * * @param connectTimeout the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder connectTimeout(int connectTimeout) { doSetProperty("connectTimeout", connectTimeout); return this; } /** * Time to wait for a socket connection to be available. Value is in * milliseconds. * * The option will be converted to a <code>int</code> type. * * Default: 10000 * Group: producer * * @param connectTimeout the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder connectTimeout(String connectTimeout) { doSetProperty("connectTimeout", connectTimeout); return this; } /** * Allows to use a timeout for the Netty producer when calling a remote * server. By default no timeout is in use. The value is in milli * seconds, so eg 30000 is 30 seconds. The requestTimeout is using * Netty's ReadTimeoutHandler to trigger the timeout. * * The option is a: <code>long</code> type. * * Group: producer * * @param requestTimeout the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder requestTimeout(long requestTimeout) { doSetProperty("requestTimeout", requestTimeout); return this; } /** * Allows to use a timeout for the Netty producer when calling a remote * server. By default no timeout is in use. The value is in milli * seconds, so eg 30000 is 30 seconds. The requestTimeout is using * Netty's ReadTimeoutHandler to trigger the timeout. * * The option will be converted to a <code>long</code> type. * * Group: producer * * @param requestTimeout the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder requestTimeout(String requestTimeout) { doSetProperty("requestTimeout", requestTimeout); return this; } /** * The netty component installs a default codec if both, encoder/decoder * is null and textline is false. Setting allowDefaultCodec to false * prevents the netty component from installing a default codec as the * first element in the filter chain. * * The option is a: <code>boolean</code> type. * * Default: true * Group: codec * * @param allowDefaultCodec the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder allowDefaultCodec(boolean allowDefaultCodec) { doSetProperty("allowDefaultCodec", allowDefaultCodec); return this; } /** * The netty component installs a default codec if both, encoder/decoder * is null and textline is false. Setting allowDefaultCodec to false * prevents the netty component from installing a default codec as the * first element in the filter chain. * * The option will be converted to a <code>boolean</code> type. * * Default: true * Group: codec * * @param allowDefaultCodec the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder allowDefaultCodec(String allowDefaultCodec) { doSetProperty("allowDefaultCodec", allowDefaultCodec); return this; } /** * Whether or not to auto append missing end delimiter when sending * using the textline codec. * * The option is a: <code>boolean</code> type. * * Default: true * Group: codec * * @param autoAppendDelimiter the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder autoAppendDelimiter(boolean autoAppendDelimiter) { doSetProperty("autoAppendDelimiter", autoAppendDelimiter); return this; } /** * Whether or not to auto append missing end delimiter when sending * using the textline codec. * * The option will be converted to a <code>boolean</code> type. * * Default: true * Group: codec * * @param autoAppendDelimiter the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder autoAppendDelimiter(String autoAppendDelimiter) { doSetProperty("autoAppendDelimiter", autoAppendDelimiter); return this; } /** * The max line length to use for the textline codec. * * The option is a: <code>int</code> type. * * Default: 1024 * Group: codec * * @param decoderMaxLineLength the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder decoderMaxLineLength(int decoderMaxLineLength) { doSetProperty("decoderMaxLineLength", decoderMaxLineLength); return this; } /** * The max line length to use for the textline codec. * * The option will be converted to a <code>int</code> type. * * Default: 1024 * Group: codec * * @param decoderMaxLineLength the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder decoderMaxLineLength(String decoderMaxLineLength) { doSetProperty("decoderMaxLineLength", decoderMaxLineLength); return this; } /** * A list of decoders to be used. You can use a String which have values * separated by comma, and have the values be looked up in the Registry. * Just remember to prefix the value with # so Camel knows it should * lookup. * * The option is a: <code>java.lang.String</code> type. * * Group: codec * * @param decoders the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder decoders(String decoders) { doSetProperty("decoders", decoders); return this; } /** * The delimiter to use for the textline codec. Possible values are LINE * and NULL. * * The option is a: * <code>org.apache.camel.component.netty.TextLineDelimiter</code> type. * * Default: LINE * Group: codec * * @param delimiter the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder delimiter(org.apache.camel.component.netty.TextLineDelimiter delimiter) { doSetProperty("delimiter", delimiter); return this; } /** * The delimiter to use for the textline codec. Possible values are LINE * and NULL. * * The option will be converted to a * <code>org.apache.camel.component.netty.TextLineDelimiter</code> type. * * Default: LINE * Group: codec * * @param delimiter the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder delimiter(String delimiter) { doSetProperty("delimiter", delimiter); return this; } /** * A list of encoders to be used. You can use a String which have values * separated by comma, and have the values be looked up in the Registry. * Just remember to prefix the value with # so Camel knows it should * lookup. * * The option is a: <code>java.lang.String</code> type. * * Group: codec * * @param encoders the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder encoders(String encoders) { doSetProperty("encoders", encoders); return this; } /** * The encoding (a charset name) to use for the textline codec. If not * provided, Camel will use the JVM default Charset. * * The option is a: <code>java.lang.String</code> type. * * Group: codec * * @param encoding the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder encoding(String encoding) { doSetProperty("encoding", encoding); return this; } /** * Only used for TCP. If no codec is specified, you can use this flag to * indicate a text line based codec; if not specified or the value is * false, then Object Serialization is assumed over TCP - however only * Strings are allowed to be serialized by default. * * The option is a: <code>boolean</code> type. * * Default: false * Group: codec * * @param textline the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder textline(boolean textline) { doSetProperty("textline", textline); return this; } /** * Only used for TCP. If no codec is specified, you can use this flag to * indicate a text line based codec; if not specified or the value is * false, then Object Serialization is assumed over TCP - however only * Strings are allowed to be serialized by default. * * The option will be converted to a <code>boolean</code> type. * * Default: false * Group: codec * * @param textline the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder textline(String textline) { doSetProperty("textline", textline); return this; } /** * Which protocols to enable when using SSL. * * The option is a: <code>java.lang.String</code> type. * * Default: TLSv1.2,TLSv1.3 * Group: security * * @param enabledProtocols the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder enabledProtocols(String enabledProtocols) { doSetProperty("enabledProtocols", enabledProtocols); return this; } /** * To enable/disable hostname verification on SSLEngine. * * The option is a: <code>boolean</code> type. * * Default: false * Group: security * * @param hostnameVerification the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder hostnameVerification(boolean hostnameVerification) { doSetProperty("hostnameVerification", hostnameVerification); return this; } /** * To enable/disable hostname verification on SSLEngine. * * The option will be converted to a <code>boolean</code> type. * * Default: false * Group: security * * @param hostnameVerification the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder hostnameVerification(String hostnameVerification) { doSetProperty("hostnameVerification", hostnameVerification); return this; } /** * Keystore format to be used for payload encryption. Defaults to JKS if * not set. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param keyStoreFormat the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder keyStoreFormat(String keyStoreFormat) { doSetProperty("keyStoreFormat", keyStoreFormat); return this; } /** * Client side certificate keystore to be used for encryption. Is loaded * by default from classpath, but you can prefix with classpath:, file:, * or http: to load the resource from different systems. * * This option can also be loaded from an existing file, by prefixing * with file: or classpath: followed by the location of the file. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param keyStoreResource the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder keyStoreResource(String keyStoreResource) { doSetProperty("keyStoreResource", keyStoreResource); return this; } /** * Password to use for the keyStore and trustStore. The same password * must be configured for both resources. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param passphrase the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder passphrase(String passphrase) { doSetProperty("passphrase", passphrase); return this; } /** * Security provider to be used for payload encryption. Defaults to * SunX509 if not set. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param securityProvider the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder securityProvider(String securityProvider) { doSetProperty("securityProvider", securityProvider); return this; } /** * Setting to specify whether SSL encryption is applied to this * endpoint. * * The option is a: <code>boolean</code> type. * * Default: false * Group: security * * @param ssl the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder ssl(boolean ssl) { doSetProperty("ssl", ssl); return this; } /** * Setting to specify whether SSL encryption is applied to this * endpoint. * * The option will be converted to a <code>boolean</code> type. * * Default: false * Group: security * * @param ssl the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder ssl(String ssl) { doSetProperty("ssl", ssl); return this; } /** * When enabled and in SSL mode, then the Netty consumer will enrich the * Camel Message with headers having information about the client * certificate such as subject name, issuer name, serial number, and the * valid date range. * * The option is a: <code>boolean</code> type. * * Default: false * Group: security * * @param sslClientCertHeaders the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder sslClientCertHeaders(boolean sslClientCertHeaders) { doSetProperty("sslClientCertHeaders", sslClientCertHeaders); return this; } /** * When enabled and in SSL mode, then the Netty consumer will enrich the * Camel Message with headers having information about the client * certificate such as subject name, issuer name, serial number, and the * valid date range. * * The option will be converted to a <code>boolean</code> type. * * Default: false * Group: security * * @param sslClientCertHeaders the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder sslClientCertHeaders(String sslClientCertHeaders) { doSetProperty("sslClientCertHeaders", sslClientCertHeaders); return this; } /** * To configure security using SSLContextParameters. * * The option is a: * <code>org.apache.camel.support.jsse.SSLContextParameters</code> type. * * Group: security * * @param sslContextParameters the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder sslContextParameters(org.apache.camel.support.jsse.SSLContextParameters sslContextParameters) { doSetProperty("sslContextParameters", sslContextParameters); return this; } /** * To configure security using SSLContextParameters. * * The option will be converted to a * <code>org.apache.camel.support.jsse.SSLContextParameters</code> type. * * Group: security * * @param sslContextParameters the value to set * @return the dsl builder */ default NettyEndpointProducerBuilder sslContextParameters(String sslContextParameters) { doSetProperty("sslContextParameters", sslContextParameters); return this; } /** * Reference to a
NettyEndpointProducerBuilder
java
mockito__mockito
mockito-core/src/main/java/org/mockito/internal/junit/MockitoTestListener.java
{ "start": 504, "end": 615 }
interface ____ extends MockCreationListener { void testFinished(TestFinishedEvent event); }
MockitoTestListener
java
apache__flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/format/DecodingFormat.java
{ "start": 4860, "end": 6922 }
interface ____<I> extends Format { /** * Creates runtime decoder implementation that is configured to produce data of the given data * type. * * @param context the context provides several utilities required to instantiate the runtime * decoder implementation of the format * @param physicalDataType For more details check the documentation of {@link DecodingFormat}. */ I createRuntimeDecoder(DynamicTableSource.Context context, DataType physicalDataType); /** * Returns the map of metadata keys and their corresponding data types that can be produced by * this format for reading. By default, this method returns an empty map. * * <p>Metadata columns add additional columns to the table's schema. A decoding format is * responsible to add requested metadata columns at the end of produced rows. * * <p>See {@link SupportsReadingMetadata} for more information. * * <p>Note: This method is only used if the outer {@link DynamicTableSource} implements {@link * SupportsReadingMetadata} and calls this method in {@link * SupportsReadingMetadata#listReadableMetadata()}. */ default Map<String, DataType> listReadableMetadata() { return Collections.emptyMap(); } /** * Provides a list of metadata keys that the produced row must contain as appended metadata * columns. By default, this method throws an exception if metadata keys are defined. * * <p>See {@link SupportsReadingMetadata} for more information. * * <p>Note: This method is only used if the outer {@link DynamicTableSource} implements {@link * SupportsReadingMetadata} and calls this method in {@link * SupportsReadingMetadata#applyReadableMetadata(List, DataType)}. */ @SuppressWarnings("unused") default void applyReadableMetadata(List<String> metadataKeys) { throw new UnsupportedOperationException( "A decoding format must override this method to apply metadata keys."); } }
DecodingFormat
java
elastic__elasticsearch
x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinder.java
{ "start": 1240, "end": 8911 }
class ____ implements TextStructureFinder { private final List<String> sampleMessages; private final TextStructure structure; static XmlTextStructureFinder makeXmlTextStructureFinder( List<String> explanation, String sample, String charsetName, Boolean hasByteOrderMarker, TextStructureOverrides overrides, TimeoutChecker timeoutChecker ) throws IOException, ParserConfigurationException, SAXException { String messagePrefix; try (Scanner scanner = new Scanner(sample)) { messagePrefix = scanner.next(); } DocumentBuilderFactory docBuilderFactory = makeDocBuilderFactory(); List<String> sampleMessages = new ArrayList<>(); List<Map<String, ?>> sampleRecords = new ArrayList<>(); String[] sampleDocEnds = sample.split(Pattern.quote(messagePrefix)); StringBuilder preamble = new StringBuilder(sampleDocEnds[0]); int linesConsumed = numNewlinesIn(sampleDocEnds[0]); for (int i = 1; i < sampleDocEnds.length; ++i) { String sampleDoc = messagePrefix + sampleDocEnds[i]; if (i < 3) { preamble.append(sampleDoc); } DocumentBuilder docBuilder = docBuilderFactory.newDocumentBuilder(); try (InputStream is = new ByteArrayInputStream(sampleDoc.getBytes(StandardCharsets.UTF_8))) { sampleRecords.add(docToMap(docBuilder.parse(is))); sampleMessages.add(sampleDoc); linesConsumed += numNewlinesIn(sampleDoc); timeoutChecker.check("XML parsing"); } catch (SAXException e) { // Tolerate an incomplete last record as long as we have one complete record if (sampleRecords.isEmpty() || i < sampleDocEnds.length - 1) { throw e; } } } if (sample.endsWith("\n") == false) { ++linesConsumed; } // null to allow GC before timestamp search sampleDocEnds = null; // If we get here the XML parser should have confirmed this assert messagePrefix.charAt(0) == '<'; String topLevelTag = messagePrefix.substring(1); TextStructure.Builder structureBuilder = new TextStructure.Builder(TextStructure.Format.XML).setCharset(charsetName) .setHasByteOrderMarker(hasByteOrderMarker) .setSampleStart(preamble.toString()) .setNumLinesAnalyzed(linesConsumed) .setNumMessagesAnalyzed(sampleRecords.size()) .setMultilineStartPattern("^\\s*<" + topLevelTag); Tuple<String, TimestampFormatFinder> timeField = TextStructureUtils.guessTimestampField( explanation, sampleRecords, overrides, timeoutChecker ); if (timeField != null) { boolean needClientTimeZone = timeField.v2().hasTimezoneDependentParsing(); structureBuilder.setTimestampField(timeField.v1()) .setJodaTimestampFormats(timeField.v2().getJodaTimestampFormats()) .setJavaTimestampFormats(timeField.v2().getJavaTimestampFormats()) .setNeedClientTimezone(needClientTimeZone) .setEcsCompatibility(overrides.getEcsCompatibility()) .setIngestPipeline( TextStructureUtils.makeIngestPipelineDefinition( null, Collections.emptyMap(), null, Collections.emptyMap(), topLevelTag + "." + timeField.v1(), timeField.v2().getJavaTimestampFormats(), needClientTimeZone, timeField.v2().needNanosecondPrecision(), overrides.getEcsCompatibility() ) ); } Tuple<SortedMap<String, Object>, SortedMap<String, FieldStats>> mappingsAndFieldStats = TextStructureUtils .guessMappingsAndCalculateFieldStats(explanation, sampleRecords, timeoutChecker, overrides.getTimestampFormat()); if (mappingsAndFieldStats.v2() != null) { structureBuilder.setFieldStats(mappingsAndFieldStats.v2()); } Map<String, Object> innerFieldMappings = mappingsAndFieldStats.v1(); Map<String, Object> secondLevelProperties = new LinkedHashMap<>(); secondLevelProperties.put(TextStructureUtils.MAPPING_TYPE_SETTING, "object"); secondLevelProperties.put(TextStructureUtils.MAPPING_PROPERTIES_SETTING, innerFieldMappings); SortedMap<String, Object> outerFieldMappings = new TreeMap<>(); outerFieldMappings.put(topLevelTag, secondLevelProperties); if (timeField != null) { outerFieldMappings.put(TextStructureUtils.DEFAULT_TIMESTAMP_FIELD, timeField.v2().getEsDateMappingTypeWithoutFormat()); } TextStructure structure = structureBuilder.setMappings( Collections.singletonMap(TextStructureUtils.MAPPING_PROPERTIES_SETTING, outerFieldMappings) ).setExplanation(explanation).build(); return new XmlTextStructureFinder(sampleMessages, structure); } private static DocumentBuilderFactory makeDocBuilderFactory() throws ParserConfigurationException { DocumentBuilderFactory docBuilderFactory = XmlUtils.getHardenedBuilderFactory(); docBuilderFactory.setNamespaceAware(false); docBuilderFactory.setValidating(false); return docBuilderFactory; } private XmlTextStructureFinder(List<String> sampleMessages, TextStructure structure) { this.sampleMessages = Collections.unmodifiableList(sampleMessages); this.structure = structure; } @Override public List<String> getSampleMessages() { return sampleMessages; } @Override public TextStructure getStructure() { return structure; } private static int numNewlinesIn(String str) { return (int) str.chars().filter(c -> c == '\n').count(); } private static Map<String, Object> docToMap(Document doc) { Map<String, Object> docAsMap = new LinkedHashMap<>(); doc.getDocumentElement().normalize(); addNodeToMap(doc.getDocumentElement(), docAsMap); return docAsMap; } private static void addNodeToMap(Node node, Map<String, Object> nodeAsMap) { NamedNodeMap attributes = node.getAttributes(); for (int i = 0; i < attributes.getLength(); ++i) { Node attribute = attributes.item(i); nodeAsMap.put(attribute.getNodeName(), attribute.getNodeValue()); } NodeList children = node.getChildNodes(); for (int i = 0; i < children.getLength(); ++i) { Node child = children.item(i); if (child.getNodeType() == Node.ELEMENT_NODE) { if (child.getChildNodes().getLength() == 1) { Node grandChild = child.getChildNodes().item(0); String value = grandChild.getNodeValue().trim(); if (value.isEmpty() == false) { nodeAsMap.put(child.getNodeName(), value); } } else { Map<String, Object> childNodeAsMap = new LinkedHashMap<>(); addNodeToMap(child, childNodeAsMap); if (childNodeAsMap.isEmpty() == false) { nodeAsMap.put(child.getNodeName(), childNodeAsMap); } } } } } }
XmlTextStructureFinder
java
google__error-prone
check_api/src/main/java/com/google/errorprone/matchers/Matchers.java
{ "start": 31516, "end": 33942 }
class ____ of the annotation (e.g. * "javax.annotation.Nullable", or "some.package.OuterClassName$InnerClassName") */ public static Matcher<MethodTree> hasAnnotationOnAnyOverriddenMethod(String annotationClass) { return (tree, state) -> { MethodSymbol methodSym = getSymbol(tree); if (methodSym == null) { return false; } if (ASTHelpers.hasAnnotation(methodSym, annotationClass, state)) { return true; } for (MethodSymbol method : ASTHelpers.findSuperMethods(methodSym, state.getTypes())) { if (ASTHelpers.hasAnnotation(method, annotationClass, state)) { return true; } } return false; }; } /** Matches a method invocation that is known to never return null. */ public static Matcher<ExpressionTree> methodReturnsNonNull() { return anyOf( instanceMethod().onDescendantOf("java.lang.Object").named("toString"), instanceMethod().onExactClass("java.lang.String"), staticMethod().onClass("java.lang.String"), instanceMethod().onExactClass("java.util.StringTokenizer").named("nextToken")); } public static Matcher<MethodTree> methodReturns(Matcher<? super Tree> returnTypeMatcher) { return (methodTree, state) -> { Tree returnTree = methodTree.getReturnType(); // Constructors have no return type. return returnTree != null && returnTypeMatcher.matches(returnTree, state); }; } public static Matcher<MethodTree> methodReturns(Supplier<Type> returnType) { return methodReturns(isSameType(returnType)); } /** Match a method that returns a non-primitive type. */ public static Matcher<MethodTree> methodReturnsNonPrimitiveType() { return methodReturns(not(isPrimitiveOrVoidType())); } /** * Match a method declaration with a specific name. * * @param methodName The name of the method to match, e.g., "equals" */ public static Matcher<MethodTree> methodIsNamed(String methodName) { return (methodTree, state) -> methodTree.getName().contentEquals(methodName); } /** * Match a method declaration that starts with a given string. * * @param prefix The prefix. */ public static Matcher<MethodTree> methodNameStartsWith(String prefix) { return (methodTree, state) -> methodTree.getName().toString().startsWith(prefix); } /** * Match a method declaration with a specific enclosing
name
java
elastic__elasticsearch
x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java
{ "start": 2518, "end": 8953 }
class ____ extends ESTestCase { private Settings globalSettings; private ThreadPool threadPool; @ClassRule public static final OpenLdapTestContainer openLdapContainer = new OpenLdapTestContainer(); @Before public void init() { Path caPath = openLdapContainer.getCaCertPath(); /* * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. * If we re-use an SSLContext, previously connected sessions can get re-established which breaks hostname * verification tests since a re-established connection does not perform hostname verification. */ globalSettings = Settings.builder() .put("path.home", createTempDir()) .put("xpack.security.authc.realms.ldap.oldap-test.ssl.certificate_authorities", caPath) .build(); threadPool = new TestThreadPool("LdapUserSearchSessionFactoryTests"); } @After public void shutdown() { terminate(threadPool); } public void testUserSearchWithBindUserOpenLDAP() throws Exception { final boolean useSecureBindPassword = randomBoolean(); String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; String userSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "oldap-test"); final Settings.Builder realmSettings = Settings.builder() .put( LdapTestCase.buildLdapSettings( realmId, new String[] { openLdapContainer.getLdapUrl() }, Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.ONE_LEVEL, null, false ) ) .put(getFullSettingKey(realmId.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase) .put(getFullSettingKey(realmId.getName(), SearchGroupsResolverSettings.USER_ATTRIBUTE), "uid") .put( getFullSettingKey(realmId, PoolingSessionFactorySettings.BIND_DN), "uid=blackwidow,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com" ) .put(getFullSettingKey(realmId.getName(), LdapUserSearchSessionFactorySettings.POOL_ENABLED), randomBoolean()) .put(getFullSettingKey(realmId, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), "full"); if (useSecureBindPassword) { final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString( getFullSettingKey(realmId, PoolingSessionFactorySettings.SECURE_BIND_PASSWORD), OpenLdapTests.PASSWORD ); realmSettings.setSecureSettings(secureSettings); } else { realmSettings.put(getFullSettingKey(realmId, PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD), OpenLdapTests.PASSWORD); } final Settings settings = realmSettings.put(globalSettings).put(getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0).build(); RealmConfig config = new RealmConfig( realmId, settings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings) ); SSLService sslService = new SSLService(TestEnvironment.newEnvironment(settings)); String[] users = new String[] { "cap", "hawkeye", "hulk", "ironman", "thor" }; try (LdapUserSearchSessionFactory sessionFactory = new LdapUserSearchSessionFactory(config, sslService, threadPool)) { for (String user : users) { // auth try (LdapSession ldap = session(sessionFactory, user, new SecureString(OpenLdapTests.PASSWORD))) { assertThat( ldap.userDn(), is( equalTo( new MessageFormat("uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com", Locale.ROOT).format( new Object[] { user }, new StringBuffer(), null ).toString() ) ) ); assertThat(groups(ldap), hasItem(containsString("Avengers"))); } // lookup try (LdapSession ldap = unauthenticatedSession(sessionFactory, user)) { assertThat( ldap.userDn(), is( equalTo( new MessageFormat("uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com", Locale.ROOT).format( new Object[] { user }, new StringBuffer(), null ).toString() ) ) ); assertThat(groups(ldap), hasItem(containsString("Avengers"))); } } } if (useSecureBindPassword == false) { assertSettingDeprecationsAndWarnings( new Setting<?>[] { config.getConcreteSetting(PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD) } ); } } private LdapSession session(SessionFactory factory, String username, SecureString password) { PlainActionFuture<LdapSession> future = new PlainActionFuture<>(); factory.session(username, password, future); return future.actionGet(); } private List<String> groups(LdapSession ldapSession) { Objects.requireNonNull(ldapSession); PlainActionFuture<List<String>> future = new PlainActionFuture<>(); ldapSession.groups(future); return future.actionGet(); } private LdapSession unauthenticatedSession(SessionFactory factory, String username) { PlainActionFuture<LdapSession> future = new PlainActionFuture<>(); factory.unauthenticatedSession(username, future); return future.actionGet(); } }
OpenLdapUserSearchSessionFactoryTests
java
assertj__assertj-core
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/example/custom/CustomAsserts_filter_stacktrace_Test.java
{ "start": 1341, "end": 4004 }
class ____ { static Stream<ThrowingCallable> stacktrace_should_not_include_assertj_elements_nor_elements_coming_from_assertj() { return Stream.of(() -> assertThat(0).isEqualTo(1), () -> assertThat(0).satisfies(x -> assertThat(x).isEqualTo(1))); } @ParameterizedTest @MethodSource void stacktrace_should_not_include_assertj_elements_nor_elements_coming_from_assertj(ThrowingCallable throwingCallable) { // WHEN var assertionError = expectAssertionError(throwingCallable); // THEN StackTraceElement[] stackTrace = assertionError.getStackTrace(); then(stackTrace).noneSatisfy(stackTraceElement -> assertThat(stackTraceElement.toString()).contains("org.assertj.core")); then(stackTrace[0].toString()).contains("CustomAsserts_filter_stacktrace_Test"); } @Test void should_filter_when_custom_assert_fails_with_message() { try { new CustomAssert("").fail(); } catch (AssertionError e) { assertThat(e.getStackTrace()).areNot(elementOf(CustomAssert.class)); } } @Test void should_filter_when_custom_assert_fails_with_overridden_message() { try { new CustomAssert("").overridingErrorMessage("overridden message").fail(); } catch (AssertionError e) { assertThat(e.getStackTrace()).areNot(elementOf(CustomAssert.class)); } } @Test void should_filter_when_custom_assert_throws_assertion_error() { try { new CustomAssert("").throwAnAssertionError(); } catch (AssertionError e) { assertThat(e.getStackTrace()).areNot(elementOf(CustomAssert.class)); } } @Test void should_filter_when_abstract_custom_assert_fails() { try { new CustomAssert("").failInAbstractAssert(); } catch (AssertionError e) { assertThat(e.getStackTrace()).areNot(elementOf(CustomAbstractAssert.class)); } } @Test void should_not_filter_when_global_remove_option_is_disabled() { Assertions.setRemoveAssertJRelatedElementsFromStackTrace(false); try { new CustomAssert("").fail(); } catch (AssertionError e) { assertThat(e.getStackTrace()).areAtLeastOne(elementOf(CustomAssert.class)); } } @BeforeEach @AfterEach void enableStackTraceFiltering() { Assertions.setRemoveAssertJRelatedElementsFromStackTrace(true); } private static Condition<StackTraceElement> elementOf(final Class<?> clazz) { return new Condition<StackTraceElement>("StackTraceElement of " + clazz) { @Override public boolean matches(StackTraceElement value) { return value.getClassName().equals(clazz.getName()); } }; } private static
CustomAsserts_filter_stacktrace_Test
java
quarkusio__quarkus
extensions/amazon-lambda/deployment/src/test/java/io/quarkus/amazon/lambda/deployment/RequestHandlerJandexUtilTest.java
{ "start": 10193, "end": 11691 }
interface ____(DefaultMethodInterface.class.getName(), definition.method().declaringClass().name().toString()); } @Test public void testConcreteMethodPrefersOverDefaultMethod() { RequestHandlerJandexDefinition definition = RequestHandlerJandexUtil .discoverHandlerMethod(ConcreteOverridesDefault.class.getName(), index); assertNotNull(definition); assertEquals("handleRequest", definition.method().name()); assertEquals(Double.class.getName(), definition.inputOutputTypes().inputType().name().toString()); assertEquals(Long.class.getName(), definition.inputOutputTypes().outputType().name().toString()); // Should prefer the concrete implementation over the default method assertEquals(ConcreteOverridesDefault.class.getName(), definition.method().declaringClass().name().toString()); } @Test public void testInheritsConcreteFromParent() { RequestHandlerJandexDefinition definition = RequestHandlerJandexUtil .discoverHandlerMethod(ChildInheritsFromConcrete.class.getName(), index); assertNotNull(definition); assertEquals("handleRequest", definition.method().name()); assertEquals(Integer.class.getName(), definition.inputOutputTypes().inputType().name().toString()); assertEquals(String.class.getName(), definition.inputOutputTypes().outputType().name().toString()); // Should find the concrete method in the parent
assertEquals
java
elastic__elasticsearch
server/src/test/java/org/elasticsearch/action/datastreams/GetDataStreamSettingsActionTests.java
{ "start": 1087, "end": 5241 }
class ____ extends ESTestCase { public void testResponseToXContentEmpty() throws IOException { List<GetDataStreamSettingsAction.DataStreamSettingsResponse> responseList = new ArrayList<>(); GetDataStreamSettingsAction.Response response = new GetDataStreamSettingsAction.Response(responseList); try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { builder.humanReadable(true); response.toXContentChunked(ToXContent.EMPTY_PARAMS).forEachRemaining(xcontent -> { try { xcontent.toXContent(builder, EMPTY_PARAMS); } catch (IOException e) { fail(e); } }); Map<String, Object> xContentMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); assertThat(xContentMap, equalTo(Map.of("data_streams", List.of()))); } } public void testResponseToXContent() throws IOException { Map<String, String> dataStream1Settings = Map.of("setting1", "value1", "setting2", "value2"); Map<String, String> dataStream1EffectiveSettings = Map.of("setting1", "value1", "setting2", "value2", "setting3", "value3"); Map<String, String> dataStream2Settings = Map.of("setting4", "value4", "setting5", "value5"); Map<String, String> dataStream2EffectiveSettings = Map.of("setting4", "value4", "setting5", "value5", "settings6", "value6"); GetDataStreamSettingsAction.DataStreamSettingsResponse dataStreamSettingsResponse1 = new GetDataStreamSettingsAction.DataStreamSettingsResponse( "dataStream1", Settings.builder().loadFromMap(dataStream1Settings).build(), Settings.builder().loadFromMap(dataStream1EffectiveSettings).build() ); GetDataStreamSettingsAction.DataStreamSettingsResponse dataStreamSettingsResponse2 = new GetDataStreamSettingsAction.DataStreamSettingsResponse( "dataStream2", Settings.builder().loadFromMap(dataStream2Settings).build(), Settings.builder().loadFromMap(dataStream2EffectiveSettings).build() ); List<GetDataStreamSettingsAction.DataStreamSettingsResponse> responseList = List.of( dataStreamSettingsResponse1, dataStreamSettingsResponse2 ); GetDataStreamSettingsAction.Response response = new GetDataStreamSettingsAction.Response(responseList); try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { builder.humanReadable(true); response.toXContentChunked(ToXContent.EMPTY_PARAMS).forEachRemaining(xcontent -> { try { xcontent.toXContent(builder, EMPTY_PARAMS); } catch (IOException e) { fail(e); } }); Map<String, Object> xContentMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); assertThat( xContentMap, equalTo( Map.of( "data_streams", List.of( Map.of( "name", "dataStream1", "settings", dataStream1Settings, "effective_settings", dataStream1EffectiveSettings ), Map.of( "name", "dataStream2", "settings", dataStream2Settings, "effective_settings", dataStream2EffectiveSettings ) ) ) ) ); } } }
GetDataStreamSettingsActionTests
java
apache__camel
components/camel-cxf/camel-cxf-rest/src/main/java/org/apache/camel/component/cxf/jaxrs/CxfRsConsumer.java
{ "start": 1613, "end": 3770 }
class ____ extends DefaultConsumer implements Suspendable { private Server server; public CxfRsConsumer(CxfRsEndpoint endpoint, Processor processor) { super(endpoint, processor); } @Override public boolean isHostedService() { return true; } protected Server createServer() { CxfRsEndpoint endpoint = (CxfRsEndpoint) getEndpoint(); CxfRsInvoker cxfRsInvoker = new CxfRsInvoker(endpoint, this); JAXRSServerFactoryBean svrBean = endpoint.createJAXRSServerFactoryBean(); Bus bus = endpoint.getBus(); // We need to apply the bus setting from the CxfRsEndpoint which does not use the default bus if (bus != null) { svrBean.setBus(bus); } svrBean.setInvoker(cxfRsInvoker); // setup the UnitOfWorkCloserInterceptor for OneWayMessageProcessor svrBean.getInInterceptors().add(new UnitOfWorkCloserInterceptor(Phase.POST_INVOKE, true)); // close the UnitOfWork normally svrBean.getOutInterceptors().add(new UnitOfWorkCloserInterceptor()); // close the UnitOfWork in case of Fault svrBean.getOutFaultInterceptors().add(new UnitOfWorkCloserInterceptor()); Server server = svrBean.create(); final MessageObserver originalOutFaultObserver = server.getEndpoint().getOutFaultObserver(); //proxy OutFaultObserver so we can close org.apache.camel.spi.UnitOfWork in case of error server.getEndpoint().setOutFaultObserver(message -> { CxfUtils.closeCamelUnitOfWork(message); originalOutFaultObserver.onMessage(message); }); return server; } @Override protected void doStart() throws Exception { super.doStart(); if (server == null) { server = createServer(); } server.start(); } @Override protected void doStop() throws Exception { if (server != null) { server.stop(); server.destroy(); server = null; } super.doStop(); } public Server getServer() { return server; } }
CxfRsConsumer
java
google__truth
core/src/main/java/com/google/common/truth/IntegerSubject.java
{ "start": 1610, "end": 3080 }
class ____ { private final IntegerComparer comparer; private TolerantIntegerComparison(IntegerComparer comparer) { this.comparer = comparer; } /** * Checks that the actual value is within the tolerance of the given value or <i>not</i> within * the tolerance of the given value, depending on the choice made earlier in the fluent call * chain. The actual value and tolerance are also specified earlier in the fluent call chain. */ public void of(int other) { comparer.compareAgainst(other); } /** * @throws UnsupportedOperationException always * @deprecated {@link Object#equals(Object)} is not supported on TolerantIntegerComparison. If * you meant to compare ints, use {@link #of(int)} instead. */ @Deprecated @Override public boolean equals(@Nullable Object other) { throw new UnsupportedOperationException( "If you meant to compare ints, use .of(int) instead."); } /** * @throws UnsupportedOperationException always * @deprecated {@link Object#hashCode()} is not supported on TolerantIntegerComparison */ @Deprecated @Override public int hashCode() { throw new UnsupportedOperationException("Subject.hashCode() is not supported."); } static TolerantIntegerComparison comparing(IntegerComparer comparer) { return new TolerantIntegerComparison(comparer); } } private
TolerantIntegerComparison
java
spring-projects__spring-framework
spring-webflux/src/main/java/org/springframework/web/reactive/resource/ResourceWebHandler.java
{ "start": 3644, "end": 20511 }
class ____ implements WebHandler, InitializingBean { private static final Set<HttpMethod> SUPPORTED_METHODS = Set.of(HttpMethod.GET, HttpMethod.HEAD); private static final Log logger = LogFactory.getLog(ResourceWebHandler.class); private @Nullable ResourceLoader resourceLoader; private final List<String> locationValues = new ArrayList<>(4); private final List<Resource> locationResources = new ArrayList<>(4); private final List<Resource> locationsToUse = new ArrayList<>(4); private final List<ResourceResolver> resourceResolvers = new ArrayList<>(4); private final List<ResourceTransformer> resourceTransformers = new ArrayList<>(4); private @Nullable ResourceResolverChain resolverChain; private @Nullable ResourceTransformerChain transformerChain; private @Nullable CacheControl cacheControl; private @Nullable ResourceHttpMessageWriter resourceHttpMessageWriter; private @Nullable Map<String, MediaType> mediaTypes; private boolean useLastModified = true; private @Nullable Function<Resource, String> etagGenerator; private boolean optimizeLocations = false; /** * Provide the ResourceLoader to load {@link #setLocationValues location values} with. * @since 5.1 */ public void setResourceLoader(ResourceLoader resourceLoader) { this.resourceLoader = resourceLoader; } /** * Accepts a list of String-based location values to be resolved into * {@link Resource} locations. * @since 5.1 */ public void setLocationValues(List<String> locationValues) { Assert.notNull(locationValues, "Location values list must not be null"); this.locationValues.clear(); this.locationValues.addAll(locationValues); } /** * Return the configured location values. * @since 5.1 */ public List<String> getLocationValues() { return this.locationValues; } /** * Set the {@code List} of {@code Resource} paths to use as sources * for serving static resources. */ public void setLocations(@Nullable List<Resource> locations) { this.locationResources.clear(); if (locations != null) { for (Resource location : locations) { ResourceHandlerUtils.assertResourceLocation(location); this.locationResources.add(location); } } } /** * Return the {@code List} of {@code Resource} paths to use as sources for * serving static resources. * <p>Note that if {@link #setLocationValues(List) locationValues} are provided, * instead of loaded Resource-based locations, this method will return empty * until after initialization via {@link #afterPropertiesSet()}. * <p><strong>Note:</strong> The list of locations may be filtered to exclude * those that don't actually exist and therefore the list returned from this * method may be a subset of all given locations. See {@link #setOptimizeLocations}. * @see #setLocationValues * @see #setLocations */ public List<Resource> getLocations() { if (this.locationsToUse.isEmpty()) { // Possibly not yet initialized, return only what we have so far return this.locationResources; } return this.locationsToUse; } /** * Configure the list of {@link ResourceResolver ResourceResolvers} to use. * <p>By default {@link PathResourceResolver} is configured. If using this property, * it is recommended to add {@link PathResourceResolver} as the last resolver. */ public void setResourceResolvers(@Nullable List<ResourceResolver> resourceResolvers) { this.resourceResolvers.clear(); if (resourceResolvers != null) { this.resourceResolvers.addAll(resourceResolvers); } } /** * Return the list of configured resource resolvers. */ public List<ResourceResolver> getResourceResolvers() { return this.resourceResolvers; } /** * Configure the list of {@link ResourceTransformer ResourceTransformers} to use. * <p>By default no transformers are configured for use. */ public void setResourceTransformers(@Nullable List<ResourceTransformer> resourceTransformers) { this.resourceTransformers.clear(); if (resourceTransformers != null) { this.resourceTransformers.addAll(resourceTransformers); } } /** * Return the list of configured resource transformers. */ public List<ResourceTransformer> getResourceTransformers() { return this.resourceTransformers; } /** * Configure the {@link ResourceHttpMessageWriter} to use. * <p>By default a {@link ResourceHttpMessageWriter} will be configured. */ public void setResourceHttpMessageWriter(@Nullable ResourceHttpMessageWriter httpMessageWriter) { this.resourceHttpMessageWriter = httpMessageWriter; } /** * Return the configured resource message writer. */ public @Nullable ResourceHttpMessageWriter getResourceHttpMessageWriter() { return this.resourceHttpMessageWriter; } /** * Set the {@link org.springframework.http.CacheControl} instance to build * the Cache-Control HTTP response header. */ public void setCacheControl(@Nullable CacheControl cacheControl) { this.cacheControl = cacheControl; } /** * Return the {@link org.springframework.http.CacheControl} instance to build * the Cache-Control HTTP response header. */ public @Nullable CacheControl getCacheControl() { return this.cacheControl; } /** * Set whether we should look at the {@link Resource#lastModified()} * when serving resources and use this information to drive {@code "Last-Modified"} * HTTP response headers. * <p>This option is enabled by default and should be turned off if the metadata of * the static files should be ignored. * @since 5.3 */ public void setUseLastModified(boolean useLastModified) { this.useLastModified = useLastModified; } /** * Return whether the {@link Resource#lastModified()} information is used * to drive HTTP responses when serving static resources. * @since 5.3 */ public boolean isUseLastModified() { return this.useLastModified; } /** * Configure a generator function that will be used to create the ETag information, * given a {@link Resource} that is about to be written to the response. * <p>This function should return a String that will be used as an argument in * {@link ServerWebExchange#checkNotModified(String)}, or {@code null} if no value * can be generated for the given resource. * @param etagGenerator the HTTP ETag generator function to use. * @since 6.1 */ public void setEtagGenerator(@Nullable Function<Resource, String> etagGenerator) { this.etagGenerator = etagGenerator; } /** * Return the HTTP ETag generator function to be used when serving resources. * @return the HTTP ETag generator function * @since 6.1 */ public @Nullable Function<Resource, String> getEtagGenerator() { return this.etagGenerator; } /** * Set whether to optimize the specified locations through an existence * check on startup, filtering non-existing directories upfront so that * they do not have to be checked on every resource access. * <p>The default is {@code false}, for defensiveness against zip files * without directory entries which are unable to expose the existence of * a directory upfront. Switch this flag to {@code true} for optimized * access in case of a consistent jar layout with directory entries. * @since 5.3.13 */ public void setOptimizeLocations(boolean optimizeLocations) { this.optimizeLocations = optimizeLocations; } /** * Return whether to optimize the specified locations through an existence * check on startup, filtering non-existing directories upfront so that * they do not have to be checked on every resource access. * @since 5.3.13 */ public boolean isOptimizeLocations() { return this.optimizeLocations; } /** * Add mappings between file extensions extracted from the filename of static * {@link Resource}s and the media types to use for the response. * <p>Use of this method is typically not necessary since mappings can be * also determined via {@link MediaTypeFactory#getMediaType(Resource)}. * @param mediaTypes media type mappings * @since 5.3.2 */ public void setMediaTypes(Map<String, MediaType> mediaTypes) { if (this.mediaTypes == null) { this.mediaTypes = new HashMap<>(mediaTypes.size()); } mediaTypes.forEach((ext, type) -> this.mediaTypes.put(ext.toLowerCase(Locale.ROOT), type)); } /** * Return the {@link #setMediaTypes(Map) configured} media type mappings. * @since 5.3.2 */ public Map<String, MediaType> getMediaTypes() { return (this.mediaTypes != null ? this.mediaTypes : Collections.emptyMap()); } @Override public void afterPropertiesSet() throws Exception { resolveResourceLocations(); if (this.resourceResolvers.isEmpty()) { this.resourceResolvers.add(new PathResourceResolver()); } initAllowedLocations(); if (getResourceHttpMessageWriter() == null) { this.resourceHttpMessageWriter = new ResourceHttpMessageWriter(); } // Initialize immutable resolver and transformer chains this.resolverChain = new DefaultResourceResolverChain(this.resourceResolvers); this.transformerChain = new DefaultResourceTransformerChain(this.resolverChain, this.resourceTransformers); } private void resolveResourceLocations() { List<Resource> result = new ArrayList<>(this.locationResources); if (!this.locationValues.isEmpty()) { Assert.notNull(this.resourceLoader, "ResourceLoader is required when \"locationValues\" are configured."); Assert.isTrue(CollectionUtils.isEmpty(this.locationResources), "Please set " + "either Resource-based \"locations\" or String-based \"locationValues\", but not both."); for (String location : this.locationValues) { location = ResourceHandlerUtils.initLocationPath(location); result.add(this.resourceLoader.getResource(location)); } } if (isOptimizeLocations()) { result = result.stream().filter(Resource::exists).toList(); } this.locationsToUse.clear(); this.locationsToUse.addAll(result); } /** * Look for a {@code PathResourceResolver} among the configured resource * resolvers and set its {@code allowedLocations} property (if empty) to * match the {@link #setLocations locations} configured on this class. */ protected void initAllowedLocations() { if (CollectionUtils.isEmpty(getLocations())) { return; } for (int i = getResourceResolvers().size() - 1; i >= 0; i--) { if (getResourceResolvers().get(i) instanceof PathResourceResolver resolver) { if (ObjectUtils.isEmpty(resolver.getAllowedLocations())) { resolver.setAllowedLocations(getLocations().toArray(new Resource[0])); } break; } } } /** * Processes a resource request. * <p>Checks for the existence of the requested resource in the configured list of locations. * If the resource does not exist, a {@code 404} response will be returned to the client. * If the resource exists, the request will be checked for the presence of the * {@code Last-Modified} header, and its value will be compared against the last-modified * timestamp of the given resource, returning a {@code 304} status code if the * {@code Last-Modified} value is greater. If the resource is newer than the * {@code Last-Modified} value, or the header is not present, the content resource * of the resource will be written to the response with caching headers * set to expire one year in the future. */ @Override public Mono<Void> handle(ServerWebExchange exchange) { return getResource(exchange) .switchIfEmpty(Mono.defer(() -> { if (logger.isDebugEnabled()) { logger.debug(exchange.getLogPrefix() + "Resource not found"); } return Mono.error(new NoResourceFoundException(exchange.getRequest().getURI(), getResourcePath(exchange))); })) .flatMap(resource -> { try { if (HttpMethod.OPTIONS.equals(exchange.getRequest().getMethod())) { exchange.getResponse().getHeaders().add("Allow", "GET,HEAD,OPTIONS"); return Mono.empty(); } // Supported methods and required session HttpMethod httpMethod = exchange.getRequest().getMethod(); if (!SUPPORTED_METHODS.contains(httpMethod)) { return Mono.error(new MethodNotAllowedException( exchange.getRequest().getMethod(), SUPPORTED_METHODS)); } // Header phase String eTagValue = (getEtagGenerator() != null) ? getEtagGenerator().apply(resource) : null; Instant lastModified = isUseLastModified() ? Instant.ofEpochMilli(resource.lastModified()) : Instant.MIN; if (exchange.checkNotModified(eTagValue, lastModified)) { if (logger.isTraceEnabled()) { logger.trace(exchange.getLogPrefix() + "Resource not modified"); } return Mono.empty(); } // Apply cache settings, if any CacheControl cacheControl = getCacheControl(); if (cacheControl != null) { exchange.getResponse().getHeaders().setCacheControl(cacheControl); } // Check the media type for the resource MediaType mediaType = getMediaType(resource); setHeaders(exchange, resource, mediaType); // Content phase ResourceHttpMessageWriter writer = getResourceHttpMessageWriter(); Assert.state(writer != null, "No ResourceHttpMessageWriter"); if (HttpMethod.HEAD == httpMethod) { return writer.addDefaultHeaders(exchange.getResponse(), resource, mediaType, Hints.from(Hints.LOG_PREFIX_HINT, exchange.getLogPrefix())) .then(exchange.getResponse().setComplete()); } else { return writer.write(Mono.just(resource), null, ResolvableType.forClass(Resource.class), mediaType, exchange.getRequest(), exchange.getResponse(), Hints.from(Hints.LOG_PREFIX_HINT, exchange.getLogPrefix())); } } catch (IOException ex) { return Mono.error(ex); } }); } @SuppressWarnings("NullAway") // Lambda protected Mono<Resource> getResource(ServerWebExchange exchange) { String rawPath = getResourcePath(exchange); String path = processPath(rawPath); if (ResourceHandlerUtils.shouldIgnoreInputPath(path) || isInvalidPath(path)) { return Mono.empty(); } Assert.state(this.resolverChain != null, "ResourceResolverChain not initialized"); Assert.state(this.transformerChain != null, "ResourceTransformerChain not initialized"); return this.resolverChain.resolveResource(exchange, path, getLocations()) .flatMap(resource -> this.transformerChain.transform(exchange, resource)); } private String getResourcePath(ServerWebExchange exchange) { PathPattern pattern = exchange.getRequiredAttribute(HandlerMapping.BEST_MATCHING_PATTERN_ATTRIBUTE); if (!pattern.hasPatternSyntax()) { return pattern.getPatternString(); } PathContainer pathWithinHandler = exchange.getRequiredAttribute(HandlerMapping.PATH_WITHIN_HANDLER_MAPPING_ATTRIBUTE); return pathWithinHandler.value(); } /** * Process the given resource path. * <p>By default, this method delegates to {@link ResourceHandlerUtils#normalizeInputPath}. */ protected String processPath(String path) { return ResourceHandlerUtils.normalizeInputPath(path); } /** * Invoked after {@link ResourceHandlerUtils#isInvalidPath(String)} * to allow subclasses to perform further validation. * <p>By default, this method does not perform any validations. */ protected boolean isInvalidPath(String path) { return false; } private @Nullable MediaType getMediaType(Resource resource) { MediaType mediaType = null; String filename = resource.getFilename(); if (!CollectionUtils.isEmpty(this.mediaTypes)) { String ext = StringUtils.getFilenameExtension(filename); if (ext != null) { mediaType = this.mediaTypes.get(ext.toLowerCase(Locale.ROOT)); } } if (mediaType == null) { List<MediaType> mediaTypes = MediaTypeFactory.getMediaTypes(filename); if (!CollectionUtils.isEmpty(mediaTypes)) { mediaType = mediaTypes.get(0); } } return mediaType; } /** * Set headers on the response. Called for both GET and HEAD requests. * @param exchange current exchange * @param resource the identified resource (never {@code null}) * @param mediaType the resource's media type (never {@code null}) */ protected void setHeaders(ServerWebExchange exchange, Resource resource, @Nullable MediaType mediaType) throws IOException { HttpHeaders headers = exchange.getResponse().getHeaders(); long length = resource.contentLength(); headers.setContentLength(length); if (mediaType != null) { headers.setContentType(mediaType); } if (resource instanceof HttpResource httpResource) { exchange.getResponse().getHeaders().putAll(httpResource.getResponseHeaders()); } } @Override public String toString() { return "ResourceWebHandler " + locationToString(getLocations()); } private String locationToString(List<Resource> locations) { return locations.toString() .replaceAll("
ResourceWebHandler
java
google__guice
core/src/com/google/inject/internal/DefaultConstructionProxyFactory.java
{ "start": 1450, "end": 3160 }
class ____<T> implements ConstructionProxyFactory<T> { private final InjectionPoint injectionPoint; /** * @param injectionPoint an injection point whose member is a constructor of {@code T}. */ DefaultConstructionProxyFactory(InjectionPoint injectionPoint) { this.injectionPoint = injectionPoint; } @Override public ConstructionProxy<T> create() { @SuppressWarnings("unchecked") // the injection point is for a constructor of T final Constructor<T> constructor = (Constructor<T>) injectionPoint.getMember(); if (InternalFlags.getUseMethodHandlesOption()) { MethodHandle target = InternalMethodHandles.unreflectConstructor(constructor); // If construction fails fall through to the fastclass approach which can // access more constructors. See comments in ProviderMethod on how to change // Guice APIs to better support this. if (target != null) { return new MethodHandleProxy<T>(injectionPoint, constructor, target); } } if (InternalFlags.isBytecodeGenEnabled()) { try { BiFunction<Object, Object[], Object> fastConstructor = BytecodeGen.fastConstructor(constructor); if (fastConstructor != null) { return new FastClassProxy<T>(injectionPoint, constructor, fastConstructor); } } catch (Exception | LinkageError e) { /* fall-through */ } } if (!Modifier.isPublic(constructor.getDeclaringClass().getModifiers()) || !Modifier.isPublic(constructor.getModifiers())) { constructor.setAccessible(true); } return new ReflectiveProxy<T>(injectionPoint, constructor); } private abstract static
DefaultConstructionProxyFactory
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/SqlEndpointBuilderFactory.java
{ "start": 56483, "end": 58879 }
interface ____ extends EndpointProducerBuilder { default AdvancedSqlEndpointProducerBuilder advanced() { return (AdvancedSqlEndpointProducerBuilder) this; } /** * Whether to allow using named parameters in the queries. * * The option is a: <code>boolean</code> type. * * Default: true * Group: common * * @param allowNamedParameters the value to set * @return the dsl builder */ default SqlEndpointProducerBuilder allowNamedParameters(boolean allowNamedParameters) { doSetProperty("allowNamedParameters", allowNamedParameters); return this; } /** * Whether to allow using named parameters in the queries. * * The option will be converted to a <code>boolean</code> type. * * Default: true * Group: common * * @param allowNamedParameters the value to set * @return the dsl builder */ default SqlEndpointProducerBuilder allowNamedParameters(String allowNamedParameters) { doSetProperty("allowNamedParameters", allowNamedParameters); return this; } /** * Sets the DataSource to use to communicate with the database at * endpoint level. * * The option is a: <code>javax.sql.DataSource</code> type. * * Group: common * * @param dataSource the value to set * @return the dsl builder */ default SqlEndpointProducerBuilder dataSource(javax.sql.DataSource dataSource) { doSetProperty("dataSource", dataSource); return this; } /** * Sets the DataSource to use to communicate with the database at * endpoint level. * * The option will be converted to a <code>javax.sql.DataSource</code> * type. * * Group: common * * @param dataSource the value to set * @return the dsl builder */ default SqlEndpointProducerBuilder dataSource(String dataSource) { doSetProperty("dataSource", dataSource); return this; } /** * Specify the full package and
SqlEndpointProducerBuilder
java
apache__rocketmq
common/src/main/java/org/apache/rocketmq/common/action/RocketMQAction.java
{ "start": 1032, "end": 1162 }
interface ____ { int value(); ResourceType resource() default ResourceType.UNKNOWN; Action[] action(); }
RocketMQAction
java
elastic__elasticsearch
test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java
{ "start": 1047, "end": 1919 }
class ____ that all json layout fields appear in the first few log lines after startup * Fields available upon process startup: <code>type</code>, <code>timestamp</code>, <code>level</code>, <code>component</code>, * <code>message</code>, <code>node.name</code>, <code>cluster.name</code>. * Whereas <code>node.id</code> and <code>cluster.uuid</code> are available later once the first clusterState has been received. * * * <code>node.name</code>, <code>cluster.name</code>, <code>node.id</code>, <code>cluster.uuid</code> * should not change across all log lines * * Note that this won't pass for nodes in clusters that don't have the node name defined in elasticsearch.yml <strong>and</strong> start * with DEBUG or TRACE level logging. Those nodes log a few lines before the node.name is set by <code>LogConfigurator.setNodeName</code>. */ public abstract
verify
java
mapstruct__mapstruct
processor/src/test/java/org/mapstruct/ap/test/bugs/_3360/Vehicle.java
{ "start": 990, "end": 1141 }
class ____ extends Vehicle { public Motorbike(String name, String modelName) { super( name, modelName ); } } }
Motorbike
java
alibaba__fastjson
src/test/java/com/alibaba/json/bvt/bug/Issue1023.java
{ "start": 396, "end": 2228 }
class ____ extends TestCase { public void test_for_issue() throws Exception { Date now = new Date(); GregorianCalendar gregorianCalendar = (GregorianCalendar) GregorianCalendar.getInstance(); gregorianCalendar.setTime(now); XMLGregorianCalendar calendar = DatatypeFactory.newInstance().newXMLGregorianCalendar(gregorianCalendar); String jsonString = JSON.toJSONString(calendar); // success calendar = JSON.parseObject(jsonString, XMLGregorianCalendar.class); Object toJSON1 = JSON.toJSON(calendar); // debug看到是 Long 类型 // 所以这里会报错: // error: java.lang.ClassCastException: java.lang.Long cannot be cast to com.alibaba.fastjson.JSONObject //JSONObject jsonObject = (JSONObject) JSON.toJSON(calendar); // 所以 这里肯定会报错, 因为 jsonObject 不是JSONObject类型 //calendar = jsonObject.toJavaObject(XMLGregorianCalendar.class); List<XMLGregorianCalendar> calendarList = new ArrayList<XMLGregorianCalendar>(); calendarList.add(calendar); calendarList.add(calendar); calendarList.add(calendar); Object toJSON2 = JSON.toJSON(calendarList); // debug 看到是 JSONArray 类型 // success: 通过 JSONArray.parseArray 方法可以正确转换 JSONArray jsonArray = (JSONArray) JSON.toJSON(calendarList); jsonString = jsonArray.toJSONString(); List<XMLGregorianCalendar> calendarList1 = JSONArray.parseArray(jsonString, XMLGregorianCalendar.class); // 通过 jsonArray.toJavaList 无法转换 // error: com.alibaba.fastjson.JSONException: can not cast to : javax.xml.datatype.XMLGregorianCalendar List<XMLGregorianCalendar> calendarList2 = jsonArray.toJavaList(XMLGregorianCalendar.class); assertNotNull(calendarList2); assertEquals(3, calendarList2.size()); } }
Issue1023
java
spring-projects__spring-security
config/src/test/java/org/springframework/security/config/annotation/web/AbstractConfiguredSecurityBuilderTests.java
{ "start": 9144, "end": 9278 }
class ____ extends SecurityConfigurerAdapter<Object, TestConfiguredSecurityBuilder> { } private static final
TestSecurityConfigurer
java
apache__flink
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/UnsupportedNodesInPlanTest.java
{ "start": 1271, "end": 1841 }
class ____ extends TableTestBase { @Test void testInvalidType() { final TableEnvironment tEnv = TableEnvironment.create(EnvironmentSettings.inStreamingMode()); assertThatThrownBy( () -> tEnv.loadPlan( PlanReference.fromResource( "/jsonplan/testInvalidTypeJsonPlan.json"))) .hasRootCauseMessage("Unsupported exec node type: 'null_null'."); } }
UnsupportedNodesInPlanTest
java
junit-team__junit5
junit-platform-launcher/src/main/java/org/junit/platform/launcher/jfr/FlightRecordingExecutionListener.java
{ "start": 4009, "end": 4197 }
class ____ extends ExecutionEvent { @Label("Contains Tests") boolean containsTests; @Label("Engine Names") @Nullable String engineNames; } abstract static
TestPlanExecutionEvent
java
apache__camel
core/camel-core/src/test/java/org/apache/camel/processor/MDCOnCompletionOnCompletionTest.java
{ "start": 1292, "end": 3025 }
class ____ extends ContextTestSupport { private static final Logger LOG = LoggerFactory.getLogger(MDCOnCompletionOnCompletionTest.class); @Test public void testMDC() throws Exception { MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMinimumMessageCount(1); assertMockEndpointsSatisfied(); } @Override protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { @Override public void configure() { // enable MDC context.setUseMDCLogging(true); from("timer:foo?period=5000").routeId("route-a").setBody().constant("Hello World").onCompletion() .process(new Processor() { @Override public void process(Exchange exchange) { exchange.getExchangeExtension().addOnCompletion(new MyOnCompletion()); } }).end().to("log:foo").to("direct:b"); from("direct:b").routeId("route-b").process(new Processor() { public void process(Exchange exchange) { assertEquals("route-b", MDC.get("camel.routeId")); assertEquals(exchange.getExchangeId(), MDC.get("camel.exchangeId")); assertEquals(exchange.getIn().getMessageId(), MDC.get("camel.messageId")); MDC.put("custom.id", "1"); LOG.info("From processor in route-b"); } }).to("log:bar").to("mock:result"); } }; } private static
MDCOnCompletionOnCompletionTest