index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/dataset/DatasetsFinderFilteringDecoratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.dataset;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.Properties;
import lombok.AllArgsConstructor;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.util.function.CheckedExceptionPredicate;
import org.apache.hadoop.fs.FileSystem;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
public class DatasetsFinderFilteringDecoratorTest {
@Test
public void testFindDatasets_emptyAllowed() throws IOException {
DatasetsFinder<Dataset> mockFinder = Mockito.mock(DatasetsFinder.class);
Dataset mockDataset = Mockito.mock(Dataset.class);
Mockito.when(mockFinder.findDatasets()).thenReturn(Arrays.asList(mockDataset));
DatasetsFinderFilteringDecorator<Dataset> d = new DatasetsFinderFilteringDecorator(
mockFinder,Collections.emptyList(), Collections.emptyList());
Assert.assertEquals(d.findDatasets(), Arrays.asList(mockDataset));
}
@Test
public void testFindDatasets_allowed() throws IOException {
DatasetsFinder<Dataset> mockFinder = Mockito.mock(DatasetsFinder.class);
Dataset mockDataset = Mockito.mock(Dataset.class);
Mockito.when(mockFinder.findDatasets()).thenReturn(Arrays.asList(mockDataset));
DatasetsFinderFilteringDecorator<Dataset> d = new DatasetsFinderFilteringDecorator(
mockFinder,
Arrays.asList(new StubTrue(), new StubTrue()),
Arrays.asList(new StubFalse(), new StubFalse()));
Assert.assertEquals(d.findDatasets(), Arrays.asList(mockDataset));
}
@Test
public void testFindDatasets_denied() throws IOException {
DatasetsFinder<Dataset> mockFinder = Mockito.mock(DatasetsFinder.class);
Dataset mockDataset = Mockito.mock(Dataset.class);
Mockito.when(mockFinder.findDatasets()).thenReturn(Arrays.asList(mockDataset));
DatasetsFinderFilteringDecorator<Dataset> d = new DatasetsFinderFilteringDecorator(mockFinder,
Arrays.asList(new StubTrue(), new StubFalse()),
Arrays.asList(new StubFalse()));
Assert.assertEquals(d.findDatasets(), Collections.emptyList());
d = new DatasetsFinderFilteringDecorator(mockFinder,
Arrays.asList(new StubTrue()),
Arrays.asList(new StubFalse(), new StubTrue()));
Assert.assertEquals(d.findDatasets(), Collections.emptyList());
}
@Test
public void testFindDatasets_throwsException() throws IOException {
DatasetsFinder<Dataset> mockFinder = Mockito.mock(DatasetsFinder.class);
Dataset mockDataset = Mockito.mock(Dataset.class);
Mockito.when(mockFinder.findDatasets()).thenReturn(Arrays.asList(mockDataset));
DatasetsFinderFilteringDecorator<Dataset> datasetFinder_1 = new DatasetsFinderFilteringDecorator(mockFinder,
Arrays.asList(new StubTrue(), new ThrowsException()),
Arrays.asList(new StubFalse()));
Assert.assertThrows(IOException.class, datasetFinder_1::findDatasets);
DatasetsFinderFilteringDecorator<Dataset> datasetFinder_2 = new DatasetsFinderFilteringDecorator(mockFinder,
Arrays.asList(new StubTrue()),
Arrays.asList(new StubFalse(), new ThrowsException()));
Assert.assertThrows(IOException.class, datasetFinder_2::findDatasets);
}
@Test
public void testInstantiationOfPredicatesAndDatasetFinder() throws IOException {
DatasetsFinder<Dataset> mockFinder = Mockito.mock(DatasetsFinder.class);
Properties props = new Properties();
props.setProperty(DatasetsFinderFilteringDecorator.DATASET_CLASS, mockFinder.getClass().getName());
props.setProperty(DatasetsFinderFilteringDecorator.ALLOWED, StubTrue.class.getName());
props.setProperty(DatasetsFinderFilteringDecorator.DENIED, StubFalse.class.getName());
DatasetsFinderFilteringDecorator<Dataset>
testFilterDataFinder = new TestDatasetsFinderFilteringDecorator(Mockito.mock(FileSystem.class), props);
Assert.assertEquals(testFilterDataFinder.datasetFinder.getClass(), mockFinder.getClass());
Assert.assertEquals(testFilterDataFinder.allowDatasetPredicates.size(), 1);
CheckedExceptionPredicate<Dataset, IOException> allowListPredicate = testFilterDataFinder.allowDatasetPredicates.get(0);
Assert.assertEquals(allowListPredicate.getClass(), StubTrue.class);
Assert.assertEquals(((StubTrue) allowListPredicate).props, props);
Assert.assertEquals(testFilterDataFinder.denyDatasetPredicates.size(), 1);
CheckedExceptionPredicate<Dataset, IOException> denyListPredicate = testFilterDataFinder.denyDatasetPredicates.get(0);
Assert.assertEquals(denyListPredicate.getClass(), StubFalse.class);
Assert.assertEquals(((StubFalse) denyListPredicate).props, props);
}
static class TestDatasetsFinderFilteringDecorator extends DatasetsFinderFilteringDecorator<Dataset> {
public TestDatasetsFinderFilteringDecorator(FileSystem fs, Properties properties) throws IOException {
super(fs, properties);
}
}
@AllArgsConstructor
static class StubTrue implements CheckedExceptionPredicate<Dataset, IOException> {
Properties props;
StubTrue() {
this.props = null;
}
@Override
public boolean test(Dataset arg) throws IOException {
return true;
}
}
@AllArgsConstructor
static class StubFalse implements CheckedExceptionPredicate<Dataset, IOException> {
Properties props;
StubFalse() {
this.props = null;
}
@Override
public boolean test(Dataset arg) throws IOException {
return false;
}
}
static class ThrowsException implements CheckedExceptionPredicate<Dataset, IOException> {
@Override
public boolean test(Dataset arg) throws IOException {
throw new IOException("Throwing a test exception");
}
}
}
| 2,300 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/dataset/TimePartitionedGlobFinderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.dataset;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.file.Paths;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import org.apache.gobblin.dataset.FileSystemDataset;
public class TimePartitionedGlobFinderTest {
private Path testRootDir;
private FileSystem localFs;
private ZoneId zone;
private ZonedDateTime curTime;
@BeforeClass
private void setup()
throws IOException {
localFs = FileSystem.getLocal(new Configuration());
testRootDir = new Path(Paths.get("").toAbsolutePath().toString(),
getClass().getSimpleName());
if (localFs.exists(testRootDir)) {
localFs.delete(testRootDir, true);
}
localFs.mkdirs(testRootDir);
localFs.deleteOnExit(testRootDir);
zone = ZoneId.of("America/Los_Angeles");
LocalDateTime localTime = LocalDateTime.of(2019,1,1,0,0);
curTime = ZonedDateTime.of(localTime, zone);
}
@Test
public void testDayPartitions()
throws IOException {
String hourlyFormat = "yyyy/MM/dd/HH";
String hourlyPrefix = "hourly/";
String dayFormat = "yyyy/MM/dd";
// create an empty dataset /db1/table1/hourly
Path ds1 = createDatasetPath("db1/table1");
// create dataset /db2/table2/hourly
Path ds2 = createDatasetPath("db2/table2");
createPartitions(ds1, hourlyPrefix,0, 2, hourlyFormat);
createPartitions(ds2, hourlyPrefix,-1, 2, hourlyFormat);
String datasetPattern = new Path(testRootDir, "*/*").toString();
// Test glob finder without creating empty partition
Properties props = new Properties();
props.setProperty("gobblin.dataset.pattern", datasetPattern);
props.setProperty("timePartitionGlobFinder.partitionPrefix", hourlyPrefix);
props.setProperty("timePartitionGlobFinder.timeFormat", dayFormat);
props.setProperty("timePartitionGlobFinder.lookbackSpec", "P2D");
props.setProperty("timePartitionGlobFinder.granularity", "DAY");
TimePartitionGlobFinder finder = new TimePartitionGlobFinder(localFs, props, curTime);
List<FileSystemDataset> datasets = finder.findDatasets();
Assert.assertEquals(datasets.size(), 2);
// Verify there are 2 day partitions for /db2/table2
Assert.assertNotNull(find(getPartitionPath(ds1, hourlyPrefix, 0, dayFormat), datasets));
Assert.assertNotNull(find(getPartitionPath(ds2, hourlyPrefix, -1, dayFormat), datasets));
// Test glob finder with creating empty partition
props.setProperty("timePartitionGlobFinder.enableVirtualPartition", "true");
finder = new TimePartitionGlobFinder(localFs, props, curTime);
datasets = finder.findDatasets();
Assert.assertEquals(datasets.size(), 6);
// Verify virtual partitions for /db1/table1
Assert.assertTrue(find(getPartitionPath(ds1, hourlyPrefix, -1, dayFormat), datasets).isVirtual());
Assert.assertTrue(find(getPartitionPath(ds1, hourlyPrefix, -2, dayFormat), datasets).isVirtual());
// Verify virtual partitions for /db2/table2
Assert.assertTrue(find(getPartitionPath(ds2, hourlyPrefix, 0, dayFormat), datasets).isVirtual());
Assert.assertTrue(find(getPartitionPath(ds2, hourlyPrefix, -2, dayFormat), datasets).isVirtual());
}
private Path getPartitionPath(Path dataset, String prefix, int dayOffset, String format) {
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(format);
return new Path(dataset, prefix + formatter.format(curTime.plusDays(dayOffset)));
}
private SimpleFileSystemDataset find(Path path, List<FileSystemDataset> list) {
for (FileSystemDataset dataset : list) {
if (dataset.datasetRoot().equals(path)) {
return (SimpleFileSystemDataset)dataset;
}
}
return null;
}
private Path createDatasetPath(String dataset)
throws IOException {
Path datasetPath = new Path(testRootDir, dataset);
localFs.mkdirs(datasetPath);
return datasetPath;
}
private void createPartitions(Path dataset, String prefix, int dayOffset, int hours, String format)
throws IOException {
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(format);
ZonedDateTime dayTime = curTime.plusDays(dayOffset);
for (int i = 0; i < hours; i++) {
Path hourPath = new Path(formatter.format(dayTime.withHour(i)));
Path datasetPartitionPath = new Path(dataset, prefix + hourPath);
Path dataFile = new Path(datasetPartitionPath, "dataFile");
try (OutputStream outputStream = localFs.create(dataFile, true)) {
outputStream.write(i);
}
}
}
@Test
public void testDerivePartitionPattern() {
String slashTimeFormat = "yyyy/MM/dd";
String dashTimeFormat = "yyyy-MM-dd";
// 2019/12/1 - 2019/12/3
LocalDateTime localTime = LocalDateTime.of(2019,12,3,0,0);
ZonedDateTime end = ZonedDateTime.of(localTime, zone);
ZonedDateTime start = end.withDayOfMonth(1);
Assert.assertEquals(TimePartitionGlobFinder.derivePartitionPattern(start, end, slashTimeFormat),
"{2019}/{12}/*");
Assert.assertEquals(TimePartitionGlobFinder.derivePartitionPattern(start, end, dashTimeFormat),
"{2019}-{12}*");
// 2019/11/30 - 2019/12/3
start = end.withMonth(11).withDayOfMonth(30);
Assert.assertEquals(TimePartitionGlobFinder.derivePartitionPattern(start, end, slashTimeFormat),
"{2019}/{11,12}/*");
Assert.assertEquals(TimePartitionGlobFinder.derivePartitionPattern(start, end, dashTimeFormat),
"{2019}-{11,12}*");
// 2018/12/1 - 2019/12/3
start = end.withYear(2018).withMonth(12).withDayOfMonth(1);
Assert.assertEquals(TimePartitionGlobFinder.derivePartitionPattern(start, end, slashTimeFormat),
"{2018,2019}/*/*");
Assert.assertEquals(TimePartitionGlobFinder.derivePartitionPattern(start, end, dashTimeFormat),
"{2018,2019}-*");
// 2018/11/30 - 2019/12/3
start = end.withYear(2018).withMonth(11).withDayOfMonth(30);
Assert.assertEquals(TimePartitionGlobFinder.derivePartitionPattern(start, end, slashTimeFormat),
"{2018,2019}/*/*");
Assert.assertEquals(TimePartitionGlobFinder.derivePartitionPattern(start, end, dashTimeFormat),
"{2018,2019}-*");
// 2018/11/30 - 2019/01/3
end = end.withMonth(1);
Assert.assertEquals(TimePartitionGlobFinder.derivePartitionPattern(start, end, slashTimeFormat),
"{2018,2019}/{11,12,01}/*");
Assert.assertEquals(TimePartitionGlobFinder.derivePartitionPattern(start, end, dashTimeFormat),
"{2018,2019}-{11,12,01}*");
// Test hourly
Assert.assertEquals(TimePartitionGlobFinder.derivePartitionPattern(start, end, "yyyy/MM/dd/HH"),
"{2018,2019}/{11,12,01}/*/*");
Assert.assertEquals(TimePartitionGlobFinder.derivePartitionPattern(start, end, "yyyy-MM-dd-HH"),
"{2018,2019}-{11,12,01}*");
// 2019/1/1 - 2019/1/3
start = start.withYear(2019).withMonth(1).withDayOfMonth(1);
Assert.assertEquals(TimePartitionGlobFinder.derivePartitionPattern(start, end, slashTimeFormat),
"{2019}/{01}/*");
Assert.assertEquals(TimePartitionGlobFinder.derivePartitionPattern(start, end, dashTimeFormat),
"{2019}-{01}*");
}
@Test
public void testSupportedTimeFormat() {
Assert.assertTrue(TimePartitionGlobFinder.isTimeFormatSupported("yyyy/MM/dd/HH"));
Assert.assertTrue(TimePartitionGlobFinder.isTimeFormatSupported("yyyy/MM/dd"));
Assert.assertFalse(TimePartitionGlobFinder.isTimeFormatSupported("MM/dd/yyyy"));
Assert.assertTrue(TimePartitionGlobFinder.isTimeFormatSupported("yyyy-MM-dd"));
Assert.assertTrue(TimePartitionGlobFinder.isTimeFormatSupported("yyyy-MM-dd-HH"));
Assert.assertFalse(TimePartitionGlobFinder.isTimeFormatSupported("MM-dd-yyyy"));
}
@AfterClass
private void cleanup()
throws IOException {
if (localFs != null && localFs.exists(testRootDir)) {
localFs.delete(testRootDir, true);
}
}
}
| 2,301 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/dataset/ManifestBasedDatasetFinderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.dataset;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.ManifestBasedDataset;
import org.apache.gobblin.data.management.copy.ManifestBasedDatasetFinder;
import org.apache.gobblin.data.management.copy.entities.PostPublishStep;
import org.apache.gobblin.data.management.partition.FileSet;
import org.apache.gobblin.util.commit.SetPermissionCommitStep;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.io.Files;
import static org.mockito.Mockito.*;
public class ManifestBasedDatasetFinderTest {
private FileSystem localFs;
private File tmpDir;
public ManifestBasedDatasetFinderTest() throws IOException {
localFs = FileSystem.getLocal(new Configuration());
tmpDir = Files.createTempDir();
tmpDir.deleteOnExit();
}
@Test
public void testFindDataset() throws IOException {
//Get manifest Path
String manifestPath = getClass().getClassLoader().getResource("manifestBasedDistcpTest/sampleManifest.json").getPath();
// Test manifestDatasetFinder
Properties props = new Properties();
props.setProperty("gobblin.copy.manifestBased.manifest.location", manifestPath);
ManifestBasedDatasetFinder finder = new ManifestBasedDatasetFinder(localFs, props);
List<ManifestBasedDataset> datasets = finder.findDatasets();
Assert.assertEquals(datasets.size(), 1);
}
@Test
public void testFindFiles() throws IOException, URISyntaxException {
//Get manifest Path
Path manifestPath = new Path(getClass().getClassLoader().getResource("manifestBasedDistcpTest/sampleManifest.json").getPath());
// Test manifestDatasetFinder
Properties props = new Properties();
props.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/");
try (
FileSystem sourceFs = Mockito.mock(FileSystem.class);
FileSystem manifestReadFs = Mockito.mock(FileSystem.class);
FileSystem destFs = Mockito.mock(FileSystem.class);
) {
URI SRC_FS_URI = new URI("source", "the.source.org", "/", null);
URI MANIFEST_READ_FS_URI = new URI("manifest-read", "the.manifest-source.org", "/", null);
URI DEST_FS_URI = new URI("dest", "the.dest.org", "/", null);
Mockito.when(sourceFs.getUri()).thenReturn(SRC_FS_URI);
Mockito.when(manifestReadFs.getUri()).thenReturn(MANIFEST_READ_FS_URI);
Mockito.when(destFs.getUri()).thenReturn(DEST_FS_URI);
Mockito.when(sourceFs.getFileStatus(any(Path.class))).thenReturn(localFs.getFileStatus(new Path(tmpDir.toString())));
Mockito.when(sourceFs.exists(any(Path.class))).thenReturn(true);
Mockito.when(manifestReadFs.exists(any(Path.class))).thenReturn(true);
Mockito.when(manifestReadFs.getFileStatus(manifestPath)).thenReturn(localFs.getFileStatus(manifestPath));
Mockito.when(manifestReadFs.open(manifestPath)).thenReturn(localFs.open(manifestPath));
Mockito.when(destFs.exists(any(Path.class))).thenReturn(false);
Mockito.doAnswer(invocation -> {
Object[] args = invocation.getArguments();
Path path = (Path)args[0];
return localFs.makeQualified(path);
}).when(sourceFs).makeQualified(any(Path.class));
Iterator<FileSet<CopyEntity>> fileSets =
new ManifestBasedDataset(sourceFs, manifestReadFs, manifestPath, props).getFileSetIterator(destFs, CopyConfiguration.builder(destFs, props).build());
Assert.assertTrue(fileSets.hasNext());
FileSet<CopyEntity> fileSet = fileSets.next();
Assert.assertEquals(fileSet.getFiles().size(), 3); // 2 files to copy + 1 post publish step
Assert.assertTrue(((PostPublishStep)fileSet.getFiles().get(2)).getStep() instanceof SetPermissionCommitStep);
Mockito.verify(manifestReadFs, Mockito.times(1)).exists(manifestPath);
Mockito.verify(manifestReadFs, Mockito.times(1)).getFileStatus(manifestPath);
Mockito.verify(manifestReadFs, Mockito.times(1)).open(manifestPath);
Mockito.verifyNoMoreInteractions(manifestReadFs);
Mockito.verify(sourceFs, Mockito.times(2)).exists(any(Path.class));
}
}
}
| 2,302 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/dataset/TestCopyManifest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.dataset;
import com.google.common.io.Files;
import java.io.File;
import java.io.IOException;
import org.apache.gobblin.data.management.copy.CopyManifest;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestCopyManifest {
private FileSystem localFs;
public TestCopyManifest() throws IOException {
localFs = FileSystem.getLocal(new Configuration());
}
@Test
public void manifestSanityRead() throws IOException {
//Get manifest Path
String manifestPath =
getClass().getClassLoader().getResource("manifestBasedDistcpTest/sampleManifest.json").getPath();
CopyManifest manifest = CopyManifest.read(localFs, new Path(manifestPath));
Assert.assertEquals(manifest._copyableUnits.size(), 2);
CopyManifest.CopyableUnit cu = manifest._copyableUnits.get(0);
Assert.assertEquals(cu.fileName, "/tmp/dataset/test1.txt");
}
@Test
public void manifestSanityWrite() throws IOException {
File tmpDir = Files.createTempDir();
Path output = new Path(tmpDir.getAbsolutePath(), "test");
CopyManifest manifest = new CopyManifest();
manifest.add(new CopyManifest.CopyableUnit("testfilename", null, null, null));
manifest.write(localFs, output);
CopyManifest readManifest = CopyManifest.read(localFs, output);
Assert.assertEquals(readManifest._copyableUnits.size(), 1);
Assert.assertEquals(readManifest._copyableUnits.get(0).fileName, "testfilename");
}
@Test
public void manifestSanityReadIterator() throws IOException {
//Get manifest Path
String manifestPath =
getClass().getClassLoader().getResource("manifestBasedDistcpTest/sampleManifest.json").getPath();
CopyManifest manifest = CopyManifest.read(localFs, new Path(manifestPath));
CopyManifest.CopyableUnitIterator manifestIterator = CopyManifest.getReadIterator(localFs, new Path(manifestPath));
int count = 0;
while (manifestIterator.hasNext()) {
CopyManifest.CopyableUnit cu = manifestIterator.next();
Assert.assertEquals(cu.fileName, manifest._copyableUnits.get(count).fileName);
Assert.assertEquals(cu.fileGroup, manifest._copyableUnits.get(count).fileGroup);
count++;
}
Assert.assertEquals(count, 2);
Assert.assertEquals(count, manifest._copyableUnits.size());
manifestIterator.close();
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void invalidCopyableUnit() {
new CopyManifest.CopyableUnit(null, null, null, null);
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void invalidReadIteratorCopyManifest() throws IOException {
String manifestPath =
getClass().getClassLoader().getResource("manifestBasedDistcpTest/missingFileNameManifest.json").getPath();
CopyManifest.CopyableUnitIterator manifestIterator = CopyManifest.getReadIterator(localFs, new Path(manifestPath));
manifestIterator.next();
}
}
| 2,303 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/dataset/PathFilterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.dataset;
import java.util.Properties;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.util.filters.RegexPathFilter;
public class PathFilterTest {
@Test
public void testRegexFilter() {
Path unmatchedPath = new Path(".abc");
Path matchedPath1 = new Path("abc");
Path matchedPath2 = new Path("a.bc");
Properties props = new Properties();
props.setProperty(DatasetUtils.PATH_FILTER_KEY, RegexPathFilter.class.getName());
props.setProperty(DatasetUtils.CONFIGURATION_KEY_PREFIX + RegexPathFilter.REGEX, "^[^.].*"); // match everything that does not start with a dot
PathFilter includeFilter = DatasetUtils.instantiatePathFilter(props);
Assert.assertFalse(includeFilter.accept(unmatchedPath));
Assert.assertTrue(includeFilter.accept(matchedPath1));
Assert.assertTrue(includeFilter.accept(matchedPath2));
}
}
| 2,304 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/source/DatasetFinderSourceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.source;
import java.io.IOException;
import java.util.List;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.IterableDatasetFinder;
import org.apache.gobblin.dataset.PartitionableDataset;
import org.apache.gobblin.dataset.test.SimpleDatasetForTesting;
import org.apache.gobblin.dataset.test.SimpleDatasetPartitionForTesting;
import org.apache.gobblin.dataset.test.SimplePartitionableDatasetForTesting;
import org.apache.gobblin.dataset.test.StaticDatasetsFinderForTesting;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.source.workunit.WorkUnitStream;
public class DatasetFinderSourceTest {
public static final String DATASET_URN = "test.datasetUrn";
public static final String PARTITION_URN = "test.partitionUrn";
@Test
public void testNonDrilledDown() {
Dataset dataset1 = new SimpleDatasetForTesting("dataset1");
Dataset dataset2 = new SimplePartitionableDatasetForTesting("dataset2", Lists.newArrayList(new SimpleDatasetPartitionForTesting("p1"), new SimpleDatasetPartitionForTesting("p2")));
Dataset dataset3 = new SimpleDatasetForTesting("dataset3");
IterableDatasetFinder finder = new StaticDatasetsFinderForTesting(Lists.newArrayList(dataset1, dataset2, dataset3));
MySource mySource = new MySource(false, finder);
List<WorkUnit> workUnits = mySource.getWorkunits(new SourceState());
Assert.assertEquals(workUnits.size(), 3);
Assert.assertEquals(workUnits.get(0).getProp(DATASET_URN), "dataset1");
Assert.assertNull(workUnits.get(0).getProp(PARTITION_URN));
Assert.assertEquals(workUnits.get(1).getProp(DATASET_URN), "dataset2");
Assert.assertNull(workUnits.get(1).getProp(PARTITION_URN));
Assert.assertEquals(workUnits.get(2).getProp(DATASET_URN), "dataset3");
Assert.assertNull(workUnits.get(2).getProp(PARTITION_URN));
WorkUnitStream workUnitStream = mySource.getWorkunitStream(new SourceState());
Assert.assertEquals(Lists.newArrayList(workUnitStream.getWorkUnits()), workUnits);
}
@Test
public void testDrilledDown() {
Dataset dataset1 = new SimpleDatasetForTesting("dataset1");
Dataset dataset2 = new SimplePartitionableDatasetForTesting("dataset2", Lists.newArrayList(new SimpleDatasetPartitionForTesting("p1"), new SimpleDatasetPartitionForTesting("p2")));
Dataset dataset3 = new SimpleDatasetForTesting("dataset3");
IterableDatasetFinder finder = new StaticDatasetsFinderForTesting(Lists.newArrayList(dataset1, dataset2, dataset3));
MySource mySource = new MySource(true, finder);
List<WorkUnit> workUnits = mySource.getWorkunits(new SourceState());
Assert.assertEquals(workUnits.size(), 4);
Assert.assertEquals(workUnits.get(0).getProp(DATASET_URN), "dataset1");
Assert.assertNull(workUnits.get(0).getProp(PARTITION_URN));
Assert.assertEquals(workUnits.get(1).getProp(DATASET_URN), "dataset2");
Assert.assertEquals(workUnits.get(1).getProp(PARTITION_URN), "p1");
Assert.assertEquals(workUnits.get(2).getProp(DATASET_URN), "dataset2");
Assert.assertEquals(workUnits.get(2).getProp(PARTITION_URN), "p2");
Assert.assertEquals(workUnits.get(3).getProp(DATASET_URN), "dataset3");
Assert.assertNull(workUnits.get(3).getProp(PARTITION_URN));
WorkUnitStream workUnitStream = mySource.getWorkunitStream(new SourceState());
Assert.assertEquals(Lists.newArrayList(workUnitStream.getWorkUnits()), workUnits);
}
public static class MySource extends DatasetFinderSource<String, String> {
private final IterableDatasetFinder datasetsFinder;
public MySource(boolean drilldownIntoPartitions, IterableDatasetFinder datasetsFinder) {
super(drilldownIntoPartitions);
this.datasetsFinder = datasetsFinder;
}
@Override
public Extractor<String, String> getExtractor(WorkUnitState state) throws IOException {
return null;
}
@Override
protected WorkUnit workUnitForDataset(Dataset dataset) {
WorkUnit workUnit = new WorkUnit();
workUnit.setProp(DATASET_URN, dataset.getUrn());
return workUnit;
}
@Override
protected WorkUnit workUnitForDatasetPartition(PartitionableDataset.DatasetPartition partition) {
WorkUnit workUnit = new WorkUnit();
workUnit.setProp(DATASET_URN, partition.getDataset().getUrn());
workUnit.setProp(PARTITION_URN, partition.getUrn());
return workUnit;
}
@Override
public void shutdown(SourceState state) {
}
@Override
protected IterableDatasetFinder createDatasetsFinder(SourceState state) throws IOException {
return this.datasetsFinder;
}
}
}
| 2,305 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/source/LoopingDatasetFinderSourceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.source;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.source.extractor.WatermarkInterval;
import org.apache.hadoop.conf.Configuration;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.IterableDatasetFinder;
import org.apache.gobblin.dataset.PartitionableDataset;
import org.apache.gobblin.dataset.test.SimpleDatasetForTesting;
import org.apache.gobblin.dataset.test.SimpleDatasetPartitionForTesting;
import org.apache.gobblin.dataset.test.SimplePartitionableDatasetForTesting;
import org.apache.gobblin.dataset.test.StaticDatasetsFinderForTesting;
import org.apache.gobblin.runtime.FsDatasetStateStore;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.TaskState;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.source.workunit.WorkUnitStream;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class LoopingDatasetFinderSourceTest {
private static final String TEST_JOB_NAME_1 = "TestJob1";
private static final String TEST_JOB_NAME_2 = "TestJob2";
private static final String TEST_JOB_ID = "TestJob11";
private static final String TEST_TASK_ID_PREFIX = "TestTask-";
private static final String TEST_STATE_STORE_ROOT_DIR = "/tmp/LoopingSourceTest";
private FsDatasetStateStore fsDatasetStateStore;
private long startTime = System.currentTimeMillis();
@BeforeClass
public void setUp()
throws IOException {
this.fsDatasetStateStore = new FsDatasetStateStore(ConfigurationKeys.LOCAL_FS_URI, TEST_STATE_STORE_ROOT_DIR);
// clear data that may have been left behind by a prior test run
this.fsDatasetStateStore.delete(TEST_JOB_NAME_1);
this.fsDatasetStateStore.delete(TEST_JOB_NAME_2);
}
@Test
public void testNonDrilldown() {
Dataset dataset1 = new SimpleDatasetForTesting("dataset1");
Dataset dataset2 = new SimplePartitionableDatasetForTesting("dataset2",
Lists.newArrayList(new SimpleDatasetPartitionForTesting("p1"), new SimpleDatasetPartitionForTesting("p2")));
Dataset dataset3 = new SimpleDatasetForTesting("dataset3");
Dataset dataset4 = new SimpleDatasetForTesting("dataset4");
Dataset dataset5 = new SimpleDatasetForTesting("dataset5");
IterableDatasetFinder finder =
new StaticDatasetsFinderForTesting(Lists.newArrayList(dataset5, dataset4, dataset3, dataset2, dataset1));
MySource mySource = new MySource(false, finder);
SourceState sourceState = new SourceState();
sourceState.setProp(LoopingDatasetFinderSource.MAX_WORK_UNITS_PER_RUN_KEY, 3);
WorkUnitStream workUnitStream = mySource.getWorkunitStream(sourceState);
List<WorkUnit> workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
verifyWorkUnitState(workUnits, "dataset3", null, false, false);
// Second run should continue where it left off
List<WorkUnitState> workUnitStates = workUnits.stream().map(WorkUnitState::new).collect(Collectors.toList());
SourceState sourceStateSpy = Mockito.spy(sourceState);
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates();
workUnitStream = mySource.getWorkunitStream(sourceStateSpy);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 3);
verifyWorkUnitState(workUnits, "dataset5", null, true, false);
// Loop around
workUnitStates = workUnits.stream().map(WorkUnitState::new).collect(Collectors.toList());
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates();
workUnitStream = mySource.getWorkunitStream(sourceStateSpy);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
verifyWorkUnitState(workUnits, "dataset3", null, false, false);
}
@Test
public void testDrilldown() {
// Create three datasets, two of them partitioned
Dataset dataset1 = new SimpleDatasetForTesting("dataset1");
Dataset dataset2 = new SimplePartitionableDatasetForTesting("dataset2", Lists
.newArrayList(new SimpleDatasetPartitionForTesting("p1"), new SimpleDatasetPartitionForTesting("p2"),
new SimpleDatasetPartitionForTesting("p3")));
Dataset dataset3 = new SimplePartitionableDatasetForTesting("dataset3", Lists
.newArrayList(new SimpleDatasetPartitionForTesting("p1"), new SimpleDatasetPartitionForTesting("p2"),
new SimpleDatasetPartitionForTesting("p3")));
IterableDatasetFinder finder = new StaticDatasetsFinderForTesting(Lists.newArrayList(dataset3, dataset2, dataset1));
MySource mySource = new MySource(true, finder);
// Limit to 3 wunits per run
SourceState sourceState = new SourceState();
sourceState.setProp(LoopingDatasetFinderSource.MAX_WORK_UNITS_PER_RUN_KEY, 3);
// first run, get three first work units
WorkUnitStream workUnitStream = mySource.getWorkunitStream(sourceState);
List<WorkUnit> workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
verifyWorkUnitState(workUnits, "dataset2", "p2", false, false);
// Second run should continue where it left off
List<WorkUnitState> workUnitStates = workUnits.stream().map(WorkUnitState::new).collect(Collectors.toList());
SourceState sourceStateSpy = Mockito.spy(sourceState);
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates();
workUnitStream = mySource.getWorkunitStream(sourceStateSpy);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
verifyWorkUnitState(workUnits, "dataset3", "p2", false, false);
// third run, continue from where it left off
workUnitStates = workUnits.stream().map(WorkUnitState::new).collect(Collectors.toList());
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates();
workUnitStream = mySource.getWorkunitStream(sourceStateSpy);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 2);
verifyWorkUnitState(workUnits, "dataset3", "p3", true, false);
// fourth run, finished all work units, loop around
workUnitStates = workUnits.stream().map(WorkUnitState::new).collect(Collectors.toList());
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates();
workUnitStream = mySource.getWorkunitStream(sourceStateSpy);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
verifyWorkUnitState(workUnits, "dataset2", "p2", false, false);
}
@Test
public void testNonDrilldownDatasetState()
throws IOException {
Dataset dataset1 = new SimpleDatasetForTesting("dataset1");
Dataset dataset2 = new SimplePartitionableDatasetForTesting("dataset2",
Lists.newArrayList(new SimpleDatasetPartitionForTesting("p1"), new SimpleDatasetPartitionForTesting("p2")));
Dataset dataset3 = new SimpleDatasetForTesting("dataset3");
Dataset dataset4 = new SimpleDatasetForTesting("dataset4");
Dataset dataset5 = new SimpleDatasetForTesting("dataset5");
IterableDatasetFinder finder =
new StaticDatasetsFinderForTesting(Lists.newArrayList(dataset5, dataset4, dataset3, dataset2, dataset1));
MySource mySource = new MySource(false, finder, fsDatasetStateStore, TEST_JOB_NAME_1);
SourceState sourceState = new SourceState();
sourceState.setProp(LoopingDatasetFinderSource.MAX_WORK_UNITS_PER_RUN_KEY, 3);
sourceState.setProp(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, TEST_STATE_STORE_ROOT_DIR);
sourceState.setProp(ConfigurationKeys.JOB_NAME_KEY, TEST_JOB_NAME_1);
WorkUnitStream workUnitStream = mySource.getWorkunitStream(sourceState, true);
List<WorkUnit> workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
List<LongWatermark> watermarks1 = new ArrayList<>();
List<Dataset> datasets1 = new ArrayList<>();
Assert.assertEquals(workUnits.get(0).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset1");
Assert.assertEquals(workUnits.get(0).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks1.add(workUnits.get(0).getExpectedHighWatermark(LongWatermark.class));
datasets1.add(dataset1);
Assert.assertEquals(workUnits.get(1).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset2");
Assert.assertEquals(workUnits.get(1).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks1.add(workUnits.get(1).getExpectedHighWatermark(LongWatermark.class));
datasets1.add(dataset2);
Assert.assertEquals(workUnits.get(2).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset3");
Assert.assertEquals(workUnits.get(2).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks1.add(workUnits.get(2).getExpectedHighWatermark(LongWatermark.class));
datasets1.add(dataset3);
Assert.assertEquals(workUnits.get(3).getProp(ConfigurationKeys.DATASET_URN_KEY),
ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
Dataset globalWmDataset = new SimpleDatasetForTesting(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
datasets1.add(globalWmDataset);
verifyWorkUnitState(workUnits,"dataset3", null, false, true);
persistDatasetState(datasets1, watermarks1, TEST_JOB_NAME_1);
testDatasetStates(datasets1, watermarks1, TEST_JOB_NAME_1);
// Second run should continue where it left off
List<LongWatermark> watermarks2 = new ArrayList<>();
List<Dataset> datasets2 = new ArrayList<>();
int workUnitSize = workUnits.size();
List<WorkUnitState> workUnitStates =
workUnits.subList(workUnitSize - 1, workUnitSize).stream().map(WorkUnitState::new).collect(Collectors.toList());
SourceState sourceStateSpy = Mockito.spy(sourceState);
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
workUnitStream = mySource.getWorkunitStream(sourceStateSpy,true);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 3);
Assert.assertEquals(workUnits.get(0).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset4");
Assert.assertEquals(workUnits.get(0).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks2.add(workUnits.get(0).getExpectedHighWatermark(LongWatermark.class));
datasets2.add(dataset4);
Assert.assertEquals(workUnits.get(1).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset5");
Assert.assertEquals(workUnits.get(1).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks2.add(workUnits.get(1).getExpectedHighWatermark(LongWatermark.class));
datasets2.add(dataset5);
Assert.assertTrue(workUnits.get(2).getPropAsBoolean(LoopingDatasetFinderSource.END_OF_DATASETS_KEY));
Assert.assertEquals(workUnits.get(2).getProp(ConfigurationKeys.DATASET_URN_KEY), ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
datasets2.add(globalWmDataset);
verifyWorkUnitState(workUnits,"dataset5",null,true, true);
persistDatasetState(datasets2, watermarks2, TEST_JOB_NAME_1);
testDatasetStates(datasets2, watermarks2, TEST_JOB_NAME_1);
// Loop around
List<LongWatermark> watermarks3 = new ArrayList<>();
List<Dataset> datasets3 = new ArrayList<>();
workUnitSize = workUnits.size();
workUnitStates = workUnits.subList(workUnitSize - 1, workUnitSize).stream().map(WorkUnitState::new).collect(Collectors.toList());
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
workUnitStream = mySource.getWorkunitStream(sourceStateSpy,true);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
Assert.assertEquals(workUnits.get(0).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset1");
Assert.assertEquals(workUnits.get(0).getLowWatermark(LongWatermark.class).getValue(), watermarks1.get(0).getValue());
watermarks3.add(workUnits.get(0).getExpectedHighWatermark(LongWatermark.class));
datasets3.add(dataset1);
Assert.assertEquals(workUnits.get(1).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset2");
Assert.assertEquals(workUnits.get(1).getLowWatermark(LongWatermark.class).getValue(), watermarks1.get(1).getValue());
watermarks3.add(workUnits.get(1).getExpectedHighWatermark(LongWatermark.class));
datasets3.add(dataset2);
Assert.assertEquals(workUnits.get(2).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset3");
Assert.assertEquals(workUnits.get(2).getLowWatermark(LongWatermark.class).getValue(), watermarks1.get(2).getValue());
watermarks3.add(workUnits.get(2).getExpectedHighWatermark(LongWatermark.class));
datasets3.add(dataset3);
Assert.assertEquals(workUnits.get(3).getProp(ConfigurationKeys.DATASET_URN_KEY), ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
datasets3.add(globalWmDataset);
verifyWorkUnitState(workUnits,"dataset3",null,false, true);
persistDatasetState(datasets3, watermarks3, TEST_JOB_NAME_1);
testDatasetStates(datasets3, watermarks3, TEST_JOB_NAME_1);
}
@Test
public void testDrilldownDatasetState()
throws IOException {
// Create three datasets, two of them partitioned
Dataset dataset1 = new SimpleDatasetForTesting("dataset1");
Dataset dataset2 = new SimplePartitionableDatasetForTesting("dataset2", Lists
.newArrayList(new SimpleDatasetPartitionForTesting("p1"), new SimpleDatasetPartitionForTesting("p2"),
new SimpleDatasetPartitionForTesting("p3")));
Dataset dataset3 = new SimplePartitionableDatasetForTesting("dataset3", Lists
.newArrayList(new SimpleDatasetPartitionForTesting("p1"), new SimpleDatasetPartitionForTesting("p2"),
new SimpleDatasetPartitionForTesting("p3")));
IterableDatasetFinder finder = new StaticDatasetsFinderForTesting(Lists.newArrayList(dataset3, dataset2, dataset1));
MySource mySource = new MySource(true, finder, fsDatasetStateStore, TEST_JOB_NAME_2);
// Limit to 3 wunits per run
SourceState sourceState = new SourceState();
sourceState.setProp(LoopingDatasetFinderSource.MAX_WORK_UNITS_PER_RUN_KEY, 3);
sourceState.setProp(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, TEST_STATE_STORE_ROOT_DIR);
sourceState.setProp(ConfigurationKeys.JOB_NAME_KEY, TEST_JOB_NAME_2);
// first run, get three first work units
WorkUnitStream workUnitStream = mySource.getWorkunitStream(sourceState,true);
List<WorkUnit> workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
List<LongWatermark> watermarks1 = new ArrayList<>();
List<Dataset> datasets1 = new ArrayList<>();
Assert.assertEquals(workUnits.size(), 4);
Assert.assertEquals(workUnits.get(0).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset1");
Assert.assertEquals(workUnits.get(0).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks1.add(workUnits.get(0).getExpectedHighWatermark(LongWatermark.class));
datasets1.add(dataset1);
Assert.assertEquals(workUnits.get(1).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset2@p1");
Assert.assertEquals(workUnits.get(1).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks1.add(workUnits.get(1).getExpectedHighWatermark(LongWatermark.class));
datasets1.add(new SimpleDatasetForTesting("dataset2@p1"));
Assert.assertEquals(workUnits.get(2).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset2@p2");
Assert.assertEquals(workUnits.get(2).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks1.add(workUnits.get(2).getExpectedHighWatermark(LongWatermark.class));
datasets1.add(new SimpleDatasetForTesting("dataset2@p2"));
Assert.assertEquals(workUnits.get(3).getProp(ConfigurationKeys.DATASET_URN_KEY), ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
Assert.assertEquals(workUnits.get(3).getProp(LoopingDatasetFinderSource.DATASET_URN), "dataset2");
Assert.assertEquals(workUnits.get(3).getProp(LoopingDatasetFinderSource.PARTITION_URN), "p2");
Dataset globalWmDataset = new SimpleDatasetForTesting(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
datasets1.add(globalWmDataset);
verifyWorkUnitState(workUnits,"dataset2","p2",false, true);
persistDatasetState(datasets1, watermarks1, TEST_JOB_NAME_2);
testDatasetStates(datasets1, watermarks1, TEST_JOB_NAME_2);
// Second run should continue where it left off
int workUnitSize = workUnits.size();
List<WorkUnitState> workUnitStates =
workUnits.subList(workUnitSize - 1, workUnitSize).stream().map(WorkUnitState::new).collect(Collectors.toList());
List<LongWatermark> watermarks2 = new ArrayList<>();
List<Dataset> datasets2 = new ArrayList<>();
SourceState sourceStateSpy = Mockito.spy(sourceState);
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
workUnitStream = mySource.getWorkunitStream(sourceStateSpy,true);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
Assert.assertEquals(workUnits.get(0).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset2@p3");
Assert.assertEquals(workUnits.get(0).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks2.add(workUnits.get(0).getExpectedHighWatermark(LongWatermark.class));
datasets2.add(new SimpleDatasetForTesting("dataset2@p3"));
Assert.assertEquals(workUnits.get(1).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset3@p1");
Assert.assertEquals(workUnits.get(1).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks2.add(workUnits.get(1).getExpectedHighWatermark(LongWatermark.class));
datasets2.add(new SimpleDatasetForTesting("dataset3@p1"));
Assert.assertEquals(workUnits.get(2).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset3@p2");
Assert.assertEquals(workUnits.get(2).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks2.add(workUnits.get(2).getExpectedHighWatermark(LongWatermark.class));
datasets2.add(new SimpleDatasetForTesting("dataset3@p2"));
Assert.assertEquals(workUnits.get(3).getProp(ConfigurationKeys.DATASET_URN_KEY), ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
Assert.assertEquals(workUnits.get(3).getProp(LoopingDatasetFinderSource.DATASET_URN), "dataset3");
Assert.assertEquals(workUnits.get(3).getProp(LoopingDatasetFinderSource.PARTITION_URN), "p2");
datasets2.add(globalWmDataset);
verifyWorkUnitState(workUnits,"dataset3","p2",false, true);
persistDatasetState(datasets2, watermarks2, TEST_JOB_NAME_2);
testDatasetStates(datasets2, watermarks2, TEST_JOB_NAME_2);
// third run, continue from where it left off
workUnitSize = workUnits.size();
workUnitStates =
workUnits.subList(workUnitSize - 1, workUnitSize).stream().map(WorkUnitState::new).collect(Collectors.toList());
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
List<LongWatermark> watermarks3 = new ArrayList<>();
List<Dataset> datasets3 = new ArrayList<>();
workUnitStream = mySource.getWorkunitStream(sourceStateSpy,true);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 2);
Assert.assertEquals(workUnits.get(0).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset3@p3");
Assert.assertEquals(workUnits.get(0).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks3.add(workUnits.get(0).getExpectedHighWatermark(LongWatermark.class));
datasets3.add(new SimpleDatasetForTesting("dataset3@p3"));
Assert.assertTrue(workUnits.get(1).getPropAsBoolean(LoopingDatasetFinderSource.END_OF_DATASETS_KEY));
Assert.assertEquals(workUnits.get(1).getProp(ConfigurationKeys.DATASET_URN_KEY), ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
Assert.assertEquals(workUnits.get(1).getProp(LoopingDatasetFinderSource.DATASET_URN), "dataset3");
Assert.assertEquals(workUnits.get(1).getProp(LoopingDatasetFinderSource.PARTITION_URN), "p3");
datasets3.add(globalWmDataset);
verifyWorkUnitState(workUnits,"dataset3","p3",true, true);
persistDatasetState(datasets3, watermarks3, TEST_JOB_NAME_2);
testDatasetStates(datasets3, watermarks3, TEST_JOB_NAME_2);
// fourth run, finished all work units, loop around
workUnitSize = workUnits.size();
workUnitStates =
workUnits.subList(workUnitSize - 1, workUnitSize).stream().map(WorkUnitState::new).collect(Collectors.toList());
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
List<LongWatermark> watermarks4 = new ArrayList<>();
List<Dataset> datasets4 = new ArrayList<>();
workUnitStream = mySource.getWorkunitStream(sourceStateSpy,true);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
Assert.assertEquals(workUnits.get(0).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset1");
Assert.assertEquals(workUnits.get(0).getLowWatermark(LongWatermark.class).getValue(), watermarks1.get(0).getValue());
watermarks4.add(workUnits.get(0).getExpectedHighWatermark(LongWatermark.class));
datasets4.add(new SimpleDatasetForTesting("dataset1"));
Assert.assertEquals(workUnits.get(1).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset2@p1");
Assert.assertEquals(workUnits.get(1).getLowWatermark(LongWatermark.class).getValue(), watermarks1.get(1).getValue());
watermarks4.add(workUnits.get(1).getExpectedHighWatermark(LongWatermark.class));
datasets4.add(new SimpleDatasetForTesting("dataset2@p1"));
Assert.assertEquals(workUnits.get(2).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset2@p2");
Assert.assertEquals(workUnits.get(2).getLowWatermark(LongWatermark.class).getValue(), watermarks1.get(2).getValue());
watermarks4.add(workUnits.get(2).getExpectedHighWatermark(LongWatermark.class));
datasets4.add(new SimpleDatasetForTesting("dataset2@p2"));
Assert.assertEquals(workUnits.get(3).getProp(ConfigurationKeys.DATASET_URN_KEY), ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
datasets4.add(new SimpleDatasetForTesting(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN));
verifyWorkUnitState(workUnits,"dataset2","p2",false,true);
persistDatasetState(datasets4, watermarks4, TEST_JOB_NAME_2);
testDatasetStates(datasets4, watermarks4, TEST_JOB_NAME_2);
}
public void verifyWorkUnitState(List<WorkUnit> workUnits, String datasetUrn, String partitionUrn,
boolean endOfDatasets, boolean isDatasetStateStoreEnabled) {
int i;
for (i = 0; i < workUnits.size() - 1; i++) {
Assert.assertNull(workUnits.get(i).getProp(LoopingDatasetFinderSource.DATASET_URN));
Assert.assertNull(workUnits.get(i).getProp(LoopingDatasetFinderSource.PARTITION_URN));
if(!isDatasetStateStoreEnabled) {
Assert.assertNull(workUnits.get(i).getProp(ConfigurationKeys.DATASET_URN_KEY));
}
Assert.assertNull(workUnits.get(i).getProp(LoopingDatasetFinderSource.GLOBAL_WATERMARK_DATASET_KEY));
Assert.assertNull(workUnits.get(i).getProp(LoopingDatasetFinderSource.END_OF_DATASETS_KEY));
}
Assert.assertEquals(workUnits.get(i).getProp(LoopingDatasetFinderSource.DATASET_URN), datasetUrn);
if (partitionUrn != null) {
Assert.assertEquals(workUnits.get(i).getProp(LoopingDatasetFinderSource.PARTITION_URN), partitionUrn);
} else {
Assert.assertNull(workUnits.get(i).getProp(LoopingDatasetFinderSource.PARTITION_URN));
}
if (!endOfDatasets) {
Assert.assertNull(workUnits.get(i).getProp(LoopingDatasetFinderSource.END_OF_DATASETS_KEY));
} else {
Assert.assertTrue(workUnits.get(i).getPropAsBoolean(LoopingDatasetFinderSource.END_OF_DATASETS_KEY));
}
Assert
.assertEquals(workUnits.get(i).getPropAsBoolean(LoopingDatasetFinderSource.GLOBAL_WATERMARK_DATASET_KEY), true);
}
public void persistDatasetState(List<Dataset> datasets, List<LongWatermark> watermarks, String jobName)
throws IOException {
Preconditions.checkArgument(datasets.size() >= 2);
for (int i = 0; i < datasets.size(); i++) {
String datasetUrn = datasets.get(i).getUrn();
JobState.DatasetState datasetState = new JobState.DatasetState(jobName, TEST_JOB_ID);
datasetState.setDatasetUrn(datasetUrn);
datasetState.setState(JobState.RunningState.COMMITTED);
datasetState.setId(datasetUrn);
datasetState.setStartTime(this.startTime);
datasetState.setEndTime(this.startTime + 1000);
datasetState.setDuration(1000);
TaskState taskState = new TaskState();
taskState.setJobId(TEST_JOB_ID);
taskState.setTaskId(TEST_TASK_ID_PREFIX + i);
taskState.setId(TEST_TASK_ID_PREFIX + i);
taskState.setWorkingState(WorkUnitState.WorkingState.COMMITTED);
if (i < datasets.size() - 1) {
taskState.setActualHighWatermark(watermarks.get(i));
}
datasetState.addTaskState(taskState);
this.fsDatasetStateStore.persistDatasetState(datasetUrn, datasetState);
}
}
private void testDatasetStates(List<Dataset> datasets, List<LongWatermark> watermarks, String jobName)
throws IOException {
Preconditions.checkArgument(datasets.size() >= 2);
for (int i = 0; i < datasets.size(); i++) {
JobState.DatasetState datasetState =
this.fsDatasetStateStore.getLatestDatasetState(jobName, datasets.get(i).getUrn());
Assert.assertEquals(datasetState.getDatasetUrn(), datasets.get(i).getUrn());
Assert.assertEquals(datasetState.getJobName(), jobName);
Assert.assertEquals(datasetState.getJobId(), TEST_JOB_ID);
Assert.assertEquals(datasetState.getState(), JobState.RunningState.COMMITTED);
Assert.assertEquals(datasetState.getStartTime(), this.startTime);
Assert.assertEquals(datasetState.getEndTime(), this.startTime + 1000);
Assert.assertEquals(datasetState.getDuration(), 1000);
Assert.assertEquals(datasetState.getCompletedTasks(), 1);
TaskState taskState = datasetState.getTaskStates().get(0);
Assert.assertEquals(taskState.getJobId(), TEST_JOB_ID);
Assert.assertEquals(taskState.getTaskId(), TEST_TASK_ID_PREFIX + i);
Assert.assertEquals(taskState.getId(), TEST_TASK_ID_PREFIX + i);
Assert.assertEquals(taskState.getWorkingState(), WorkUnitState.WorkingState.COMMITTED);
if (i < datasets.size() - 1) {
Assert.assertEquals(taskState.getActualHighWatermark(LongWatermark.class).getValue(),
watermarks.get(i).getValue());
}
}
}
public static class MySource extends LoopingDatasetFinderSource<String, String> {
private final IterableDatasetFinder datasetsFinder;
private boolean isDatasetStateStoreEnabled;
private DatasetStateStore fsDatasetStateStore;
private String jobName;
private Long LAST_PROCESSED_TS = System.currentTimeMillis();
MySource(boolean drilldownIntoPartitions, IterableDatasetFinder datasetsFinder) {
super(drilldownIntoPartitions);
this.datasetsFinder = datasetsFinder;
this.isDatasetStateStoreEnabled = false;
}
MySource(boolean drilldownIntoPartitions, IterableDatasetFinder datasetsFinder,
FsDatasetStateStore fsDatasetStateStore, String jobName) {
super(drilldownIntoPartitions);
this.datasetsFinder = datasetsFinder;
this.isDatasetStateStoreEnabled = true;
this.fsDatasetStateStore = fsDatasetStateStore;
this.jobName = jobName;
}
@Override
public Extractor<String, String> getExtractor(WorkUnitState state)
throws IOException {
return null;
}
@Override
protected WorkUnit workUnitForDataset(Dataset dataset) {
WorkUnit workUnit = new WorkUnit();
if(isDatasetStateStoreEnabled) {
JobState.DatasetState datasetState = null;
try {
datasetState =
(JobState.DatasetState) this.fsDatasetStateStore.getLatestDatasetState(this.jobName, dataset.getUrn());
} catch (IOException e) {
throw new RuntimeException(e);
}
LongWatermark previousWatermark;
if(datasetState != null) {
previousWatermark = datasetState.getTaskStatesAsWorkUnitStates().get(0).getActualHighWatermark(LongWatermark.class);
} else {
previousWatermark = new LongWatermark(0);
}
workUnit.setWatermarkInterval(new WatermarkInterval(previousWatermark, new LongWatermark(LAST_PROCESSED_TS)));
}
return workUnit;
}
@Override
protected WorkUnit workUnitForDatasetPartition(PartitionableDataset.DatasetPartition partition) {
WorkUnit workUnit = new WorkUnit();
if(isDatasetStateStoreEnabled) {
String datasetUrn = partition.getDataset().getUrn()+"@"+partition.getUrn();
JobState.DatasetState datasetState = null;
try {
datasetState =
(JobState.DatasetState) this.fsDatasetStateStore.getLatestDatasetState(this.jobName, datasetUrn);
} catch (IOException e) {
throw new RuntimeException(e);
}
LongWatermark previousWatermark;
if(datasetState != null) {
previousWatermark = datasetState.getTaskStatesAsWorkUnitStates().get(0).getActualHighWatermark(LongWatermark.class);
} else {
previousWatermark = new LongWatermark(0);
}
workUnit.setWatermarkInterval(new WatermarkInterval(previousWatermark, new LongWatermark(LAST_PROCESSED_TS)));
}
return workUnit;
}
@Override
public void shutdown(SourceState state) {
}
@Override
protected IterableDatasetFinder createDatasetsFinder(SourceState state)
throws IOException {
return this.datasetsFinder;
}
}
@AfterClass
public void tearDown()
throws IOException {
FileSystem fs = FileSystem.getLocal(new Configuration(false));
Path rootDir = new Path(TEST_STATE_STORE_ROOT_DIR);
if (fs.exists(rootDir)) {
fs.delete(rootDir, true);
}
}
}
| 2,306 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/trash/TestTrashTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.io.IOException;
import java.util.Arrays;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.*;
public class TestTrashTest {
@Test
public void test() throws IOException {
FileSystem fs = mock(FileSystem.class);
TestTrash trash = new TestTrash(fs, new Properties(), "user");
Path path1 = new Path("/some/path");
Path path2 = new Path("/some/other/path");
Assert.assertTrue(trash.moveToTrash(path1));
Assert.assertTrue(trash.moveToTrashAsOwner(path2));
System.out.println(Arrays.toString(trash.getDeleteOperations().toArray()));
Assert.assertEquals(trash.getDeleteOperations().size(), 2);
Assert.assertTrue(trash.getDeleteOperations().get(0).getPath().equals(path1));
Assert.assertNull(trash.getDeleteOperations().get(0).getUser());
Assert.assertTrue(trash.getDeleteOperations().get(1).getPath().equals(path2));
Assert.assertTrue(trash.getDeleteOperations().get(1).getUser().equals("user"));
}
@Test(enabled=false)
public void testDelay() throws Exception {
ExecutorService executorService = Executors.newFixedThreadPool(5);
FileSystem fs = mock(FileSystem.class);
Properties properties = new Properties();
TestTrash.simulateDelay(properties, 3);
final TestTrash trash = new TestTrash(fs, properties, "user");
final Path path1 = new Path("/some/path");
Future<Boolean> future1 = executorService.submit(new Callable<Boolean>() {
@Override public Boolean call() throws Exception {
return trash.moveToTrash(path1);
}
});
while(trash.getOperationsReceived() < 1) {
// Wait until confirm that operation was received by trash.
}
Assert.assertFalse(future1.isDone());
Assert.assertEquals(trash.getDeleteOperations().size(), 0);
trash.tick();
Assert.assertFalse(future1.isDone());
Assert.assertEquals(trash.getDeleteOperations().size(), 0);
trash.tick();
Assert.assertFalse(future1.isDone());
Assert.assertEquals(trash.getDeleteOperations().size(), 0);
trash.tick();
Assert.assertEquals(trash.getDeleteOperations().size(), 1);
Assert.assertTrue(future1.get());
Assert.assertNull(trash.getDeleteOperations().get(0).getUser());
Assert.assertTrue(trash.getDeleteOperations().get(0).getPath().equals(path1));
}
}
| 2,307 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/trash/TrashTestBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import static org.mockito.Mockito.*;
public class TrashTestBase {
public static final String TEST_TRASH_LOCATION = "test.trash.location";
FileSystem fs;
Trash trash;
public TrashTestBase(Properties properties) throws IOException {
this(properties, true, true, false);
}
public TrashTestBase(Properties properties, boolean trashExists, boolean trashIdentifierExists, boolean trashEmpty)
throws IOException {
this.fs = mock(FileSystem.class);
Path homeDirectory = new Path("/home/directory");
Path trashDirectory = new Path(homeDirectory, Trash.DEFAULT_TRASH_DIRECTORY);
if(properties.containsKey(TEST_TRASH_LOCATION)) {
trashDirectory = new Path(properties.getProperty(TEST_TRASH_LOCATION));
}
Path trashIdentifierFile = new Path(trashDirectory, Trash.TRASH_IDENTIFIER_FILE);
when(fs.getHomeDirectory()).thenReturn(homeDirectory);
when(fs.exists(trashDirectory)).thenReturn(trashExists);
when(fs.exists(trashIdentifierFile)).thenReturn(trashIdentifierExists);
if(trashEmpty) {
when(fs.listStatus(trashDirectory)).thenReturn(new FileStatus[]{});
} else {
when(fs.listStatus(trashDirectory)).thenReturn(new FileStatus[]{new FileStatus(0, true, 0, 0, 0, new Path("file"))});
}
when(fs.isDirectory(trashDirectory)).thenReturn(true);
when(fs.mkdirs(any(Path.class))).thenReturn(true);
when(fs.mkdirs(any(Path.class), any(FsPermission.class))).thenReturn(true);
when(fs.createNewFile(any(Path.class))).thenReturn(true);
when(fs.makeQualified(any(Path.class))).thenAnswer(new Answer<Path>() {
@Override
public Path answer(InvocationOnMock invocation)
throws Throwable {
return (Path) invocation.getArguments()[0];
}
});
this.trash = new Trash(fs, properties);
}
}
| 2,308 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/trash/AsyncTrashTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.*;
public class AsyncTrashTest {
@Test public void testAsyncTrash() throws Exception {
Properties properties = TestTrash.propertiesForTestTrash();
TestTrash.simulateDelay(properties, 2);
FileSystem fs = mock(FileSystem.class);
AsyncTrash trash = new AsyncTrash(fs, properties);
for(int i = 0; i < 5; i++) {
trash.moveToTrash(new Path("file" + i));
Thread.sleep(10);
}
for(int i = 0; i < 5; i++) {
trash.moveToTrashAsUser(new Path("file" + i), "user" + i);
Thread.sleep(10);
}
int maxWaits = 5;
while(maxWaits > 0 && ((TestTrash) trash.getDecoratedObject()).getOperationsWaiting() < 10) {
Thread.sleep(100);
maxWaits--;
//wait
}
Assert.assertTrue(((TestTrash) trash.getDecoratedObject()).getDeleteOperations().isEmpty());
((TestTrash) trash.getDecoratedObject()).tick();
Assert.assertTrue(((TestTrash) trash.getDecoratedObject()).getDeleteOperations().isEmpty());
((TestTrash) trash.getDecoratedObject()).tick();
// There is a race condition in the ScalingThreadPoolExecutor that can cause fewer threads than calls even before
// reaching the max number of threads. This is somewhat rare, and if # calls > max threads, the effect is
// essentially gone, so there is no issue for the application. However, the test would be sensitive to this issue,
// so we check that at least 8 delete operations were scheduled, giving a tolerance of 2 unscheduled threads.
// (These threads would be resolved after a few more ticks).
Assert.assertTrue(((TestTrash) trash.getDecoratedObject()).getDeleteOperations().size() > 8);
((TestTrash) trash.getDecoratedObject()).tick();
((TestTrash) trash.getDecoratedObject()).tick();
((TestTrash) trash.getDecoratedObject()).tick();
((TestTrash) trash.getDecoratedObject()).tick();
// Now we should see all 10 delete operations.
Assert.assertEquals(((TestTrash) trash.getDecoratedObject()).getDeleteOperations().size(), 10);
}
}
| 2,309 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/trash/TestCleanupPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.util.Properties;
import org.apache.hadoop.fs.FileStatus;
/**
* Policy that will delete the first snapshot, and preserve all other snapshots.
*/
public class TestCleanupPolicy implements SnapshotCleanupPolicy {
boolean deletedOne = false;
public TestCleanupPolicy(Properties properties) {
}
@Override
public boolean shouldDeleteSnapshot(FileStatus snapshot, Trash trash) {
if(this.deletedOne) {
return false;
}
this.deletedOne = true;
return true;
}
}
| 2,310 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/trash/TrashFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.*;
public class TrashFactoryTest {
@Test
public void test() throws IOException {
FileSystem fs = mock(FileSystem.class);
Path homeDirectory = new Path("/home/directory");
Path trashDirectory = new Path(homeDirectory, Trash.DEFAULT_TRASH_DIRECTORY);
Path trashIdentifierFile = new Path(trashDirectory, Trash.TRASH_IDENTIFIER_FILE);
when(fs.getHomeDirectory()).thenReturn(homeDirectory);
when(fs.exists(trashDirectory)).thenReturn(true);
when(fs.exists(trashIdentifierFile)).thenReturn(true);
when(fs.listStatus(trashDirectory)).thenReturn(new FileStatus[]{});
when(fs.isDirectory(trashDirectory)).thenReturn(true);
when(fs.mkdirs(any(Path.class))).thenReturn(true);
when(fs.mkdirs(any(Path.class), any(FsPermission.class))).thenReturn(true);
when(fs.createNewFile(any(Path.class))).thenReturn(true);
when(fs.makeQualified(any(Path.class))).thenAnswer(new Answer<Path>() {
@Override
public Path answer(InvocationOnMock invocation)
throws Throwable {
return (Path) invocation.getArguments()[0];
}
});
Properties properties;
properties = getBaseProperties(trashDirectory);
Assert.assertTrue(TrashFactory.createTrash(fs, properties) instanceof Trash);
Assert.assertTrue(TrashFactory.createProxiedTrash(fs, properties) instanceof ProxiedTrash);
properties = getBaseProperties(trashDirectory);
properties.setProperty(TrashFactory.SIMULATE, Boolean.toString(true));
Assert.assertTrue(TrashFactory.createTrash(fs, properties) instanceof MockTrash);
Assert.assertTrue(TrashFactory.createProxiedTrash(fs, properties) instanceof MockTrash);
properties = getBaseProperties(trashDirectory);
properties.setProperty(TrashFactory.TRASH_TEST, Boolean.toString(true));
Assert.assertTrue(TrashFactory.createTrash(fs, properties) instanceof TestTrash);
Assert.assertTrue(TrashFactory.createProxiedTrash(fs, properties) instanceof TestTrash);
properties = getBaseProperties(trashDirectory);
properties.setProperty(TrashFactory.SKIP_TRASH, Boolean.toString(true));
Assert.assertTrue(TrashFactory.createTrash(fs, properties) instanceof ImmediateDeletionTrash);
Assert.assertTrue(TrashFactory.createProxiedTrash(fs, properties) instanceof ImmediateDeletionTrash);
}
private Properties getBaseProperties(Path trashLocation) {
Properties properties = new Properties();
properties.setProperty(Trash.TRASH_LOCATION_KEY, trashLocation.toString());
return properties;
}
}
| 2,311 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/trash/MockTrashTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.*;
public class MockTrashTest {
@Test
public void MockTrashTest() throws IOException {
FileSystem fs = mock(FileSystem.class);
Path homeDirectory = new Path("/home/directory");
when(fs.getHomeDirectory()).thenReturn(homeDirectory);
when(fs.makeQualified(any(Path.class))).thenAnswer(new Answer<Path>() {
@Override
public Path answer(InvocationOnMock invocation)
throws Throwable {
return (Path) invocation.getArguments()[0];
}
});
Trash trash = new MockTrash(fs, new Properties(), "user");
Assert.assertTrue(trash.moveToTrash(new Path("/some/path")));
verify(fs).getHomeDirectory();
verify(fs).makeQualified(any(Path.class));
verifyNoMoreInteractions(fs);
}
}
| 2,312 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/trash/TrashTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.trash;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.regex.Pattern;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.joda.time.DateTime;
import org.joda.time.DateTimeUtils;
import org.joda.time.DateTimeZone;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.testng.internal.collections.Pair;
import com.google.common.collect.Lists;
import static org.mockito.Mockito.*;
@Test(groups = { "SystemTimeTests"})
public class TrashTest {
@Test
public void testCreateTrash() throws IOException {
new TrashTestBase(new Properties());
}
@Test
public void testCreationCases() throws IOException {
TrashTestBase trash;
// If trash ident file doesn't exist, but trash is empty, create
trash = new TrashTestBase(new Properties(), true, false, true);
verify(trash.fs).createNewFile(new Path(trash.trash.getTrashLocation(), Trash.TRASH_IDENTIFIER_FILE));
// If trash ident file doesn't exist, but trash is not empty, fail
try {
trash = new TrashTestBase(new Properties(), true, false, false);
Assert.fail();
} catch(IOException ioe) {}
trash = new TrashTestBase(new Properties(), false, false, true);
verify(trash.fs).mkdirs(trash.trash.getTrashLocation(), new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE));
verify(trash.fs).createNewFile(new Path(trash.trash.getTrashLocation(), Trash.TRASH_IDENTIFIER_FILE));
}
@Test
public void testUserReplacement() throws IOException {
Properties properties = new Properties();
properties.setProperty(Trash.TRASH_LOCATION_KEY, "/trash/$USER/dir");
Path expectedTrashPath = new Path("/trash/" + UserGroupInformation.getCurrentUser().getUserName() + "/dir");
TrashTestBase trash = new TrashTestBase(properties);
Assert.assertTrue(trash.trash.getTrashLocation().equals(expectedTrashPath));
}
@Test
public void testMoveToTrash() throws IOException {
TrashTestBase trash = new TrashTestBase(new Properties());
Path pathToDelete = new Path("/path/to/delete");
final List<Pair<Path, Path>> movedPaths = Lists.newArrayList();
when(trash.fs.exists(any(Path.class))).thenReturn(false);
when(trash.fs.rename(any(Path.class), any(Path.class))).thenAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation)
throws Throwable {
Object[] args = invocation.getArguments();
movedPaths.add(new Pair<Path, Path>((Path) args[0], (Path) args[1]));
return true;
}
});
Assert.assertTrue(trash.trash.moveToTrash(pathToDelete));
verify(trash.fs, times(1)).mkdirs(any(Path.class));
Assert.assertEquals(movedPaths.size(), 1);
Assert.assertTrue(movedPaths.get(0).first().equals(pathToDelete));
Assert.assertTrue(movedPaths.get(0).second().toString().endsWith(pathToDelete.toString()));
Assert.assertTrue(movedPaths.get(0).second().getParent().getParent().getParent().equals(trash.trash.getTrashLocation()));
}
@Test
public void testMoveToTrashExistingFile() throws IOException {
TrashTestBase trash = new TrashTestBase(new Properties());
String fileName = "delete";
Path pathToDelete = new Path("/path/to", fileName);
Pattern expectedNamePattern = Pattern.compile("^" + fileName + "_[0-9]+$");
final List<Pair<Path, Path>> movedPaths = Lists.newArrayList();
when(trash.fs.exists(any(Path.class))).thenReturn(true);
when(trash.fs.rename(any(Path.class), any(Path.class))).thenAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation)
throws Throwable {
Object[] args = invocation.getArguments();
movedPaths.add(new Pair<Path, Path>((Path) args[0], (Path) args[1]));
return true;
}
});
Assert.assertTrue(trash.trash.moveToTrash(pathToDelete));
verify(trash.fs, times(0)).mkdirs(any(Path.class));
Assert.assertEquals(movedPaths.size(), 1);
Assert.assertTrue(movedPaths.get(0).first().equals(pathToDelete));
Assert.assertTrue(movedPaths.get(0).second().getParent().toString().endsWith(pathToDelete.getParent().toString()));
Assert.assertTrue(expectedNamePattern.matcher(movedPaths.get(0).second().getName()).matches());
Assert.assertTrue(movedPaths.get(0).second().getParent().getParent().getParent().equals(trash.trash.getTrashLocation()));
}
@Test
public void testCreateSnapshot() throws IOException {
try {
TrashTestBase trash = new TrashTestBase(new Properties());
Path pathInTrash = new Path(trash.trash.getTrashLocation(), "dirInTrash");
DateTimeUtils.setCurrentMillisFixed(new DateTime(2015, 7, 15, 10, 0).getMillis());
final List<Path> createdDirs = Lists.newArrayList();
final List<Pair<Path, Path>> movedPaths = Lists.newArrayList();
when(trash.fs.listStatus(eq(trash.trash.getTrashLocation()), any(PathFilter.class))).
thenReturn(Lists.newArrayList(new FileStatus(0, true, 0, 0, 0, pathInTrash)).toArray(new FileStatus[]{}));
when(trash.fs.exists(any(Path.class))).thenReturn(false);
when(trash.fs.mkdirs(any(Path.class), any(FsPermission.class))).thenAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation)
throws Throwable {
createdDirs.add((Path) invocation.getArguments()[0]);
return true;
}
});
when(trash.fs.rename(any(Path.class), any(Path.class))).thenAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation)
throws Throwable {
Object[] args = invocation.getArguments();
movedPaths.add(new Pair<Path, Path>((Path) args[0], (Path) args[1]));
return true;
}
});
trash.trash.createTrashSnapshot();
Assert.assertEquals(createdDirs.size(), 1);
Path createdDir = createdDirs.get(0);
Assert.assertTrue(Trash.TRASH_SNAPSHOT_NAME_FORMATTER.parseDateTime(createdDir.getName()).equals(new DateTime().withZone(
DateTimeZone.UTC)));
Assert.assertEquals(movedPaths.size(), 1);
Assert.assertTrue(movedPaths.get(0).first().equals(pathInTrash));
Assert.assertTrue(movedPaths.get(0).second().getName().equals(pathInTrash.getName()));
Assert.assertTrue(movedPaths.get(0).second().getParent().equals(createdDir));
} finally {
DateTimeUtils.setCurrentMillisSystem();
}
}
@Test
public void testPurgeSnapshots() throws IOException {
try {
Properties properties = new Properties();
properties.setProperty(Trash.SNAPSHOT_CLEANUP_POLICY_CLASS_KEY, TestCleanupPolicy.class.getCanonicalName());
TrashTestBase trash = new TrashTestBase(properties);
DateTimeUtils.setCurrentMillisFixed(new DateTime(2015, 7, 15, 10, 0).withZone(DateTimeZone.UTC).getMillis());
final List<Path> deletedPaths = Lists.newArrayList();
Path snapshot1 = new Path(trash.trash.getTrashLocation(), Trash.TRASH_SNAPSHOT_NAME_FORMATTER.print(new DateTime()));
Path snapshot2 = new Path(trash.trash.getTrashLocation(),
Trash.TRASH_SNAPSHOT_NAME_FORMATTER.print(new DateTime().minusDays(1)));
when(trash.fs.listStatus(any(Path.class), any(PathFilter.class))).
thenReturn(
Lists.newArrayList(
new FileStatus(0, true, 0, 0, 0, snapshot1),
new FileStatus(0, true, 0, 0, 0, snapshot2))
.toArray(new FileStatus[]{}));
when(trash.fs.delete(any(Path.class), anyBoolean())).thenAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation)
throws Throwable {
deletedPaths.add((Path) invocation.getArguments()[0]);
return true;
}
});
trash.trash.purgeTrashSnapshots();
Assert.assertEquals(deletedPaths.size(), 1);
Assert.assertTrue(deletedPaths.get(0).equals(snapshot2));
} finally {
DateTimeUtils.setCurrentMillisSystem();
}
}
}
| 2,313 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/version/finder/DatePartitionedHiveVersionFinderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.is;
import java.net.URLDecoder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.joda.time.DateTimeZone;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.conversion.hive.LocalHiveMetastoreTestUtils;
import org.apache.gobblin.data.management.version.TimestampedHiveDatasetVersion;
@Test(groups = { "gobblin.data.management.version" })
public class DatePartitionedHiveVersionFinderTest {
private FileSystem fs;
private LocalHiveMetastoreTestUtils hiveMetastoreTestUtils;
private DateTimeFormatter formatter = DateTimeFormat.forPattern("yyyy/MM/dd/HH").withZone(DateTimeZone.forID(ConfigurationKeys.PST_TIMEZONE_NAME));
private String dbName = "VfDb1";
@BeforeClass
public void setup() throws Exception {
this.fs = FileSystem.getLocal(new Configuration());
this.hiveMetastoreTestUtils = LocalHiveMetastoreTestUtils.getInstance();
this.hiveMetastoreTestUtils.dropDatabaseIfExists(this.dbName);
this.hiveMetastoreTestUtils.createTestDb(this.dbName);
}
@AfterClass
public void cleanUp() {
try {
this.hiveMetastoreTestUtils.dropDatabaseIfExists(this.dbName);
} catch (Exception e) {
// Will get cleaned up next run of test
}
}
@Test
public void testDefaults() throws Exception {
DatePartitionHiveVersionFinder versionFinder = new DatePartitionHiveVersionFinder(this.fs, ConfigFactory.empty());
String tableName = "VfTb1";
Table tbl = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, tableName, ImmutableList.of("datepartition"));
org.apache.hadoop.hive.metastore.api.Partition tp =
this.hiveMetastoreTestUtils.addTestPartition(tbl, ImmutableList.of("2016-01-01-20"), (int) System.currentTimeMillis());
Partition partition = new Partition(new org.apache.hadoop.hive.ql.metadata.Table(tbl), tp);
assertThat(partition.getName(), is("datepartition=2016-01-01-20"));
TimestampedHiveDatasetVersion dv = versionFinder.getDatasetVersion(partition);
Assert.assertEquals(dv.getDateTime(), formatter.parseDateTime("2016/01/01/20"));
}
@Test
public void testUserDefinedDatePattern() throws Exception {
String tableName = "VfTb2";
Config conf =
ConfigFactory.parseMap(ImmutableMap.<String, String> of(DatePartitionHiveVersionFinder.PARTITION_KEY_NAME_KEY, "field1",
DatePartitionHiveVersionFinder.PARTITION_VALUE_DATE_TIME_PATTERN_KEY, "yyyy/MM/dd/HH"));
DatePartitionHiveVersionFinder versionFinder = new DatePartitionHiveVersionFinder(this.fs, conf);
Table tbl = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, tableName, ImmutableList.of("field1"));
org.apache.hadoop.hive.metastore.api.Partition tp =
this.hiveMetastoreTestUtils.addTestPartition(tbl, ImmutableList.of("2016/01/01/20"), (int) System.currentTimeMillis());
Partition partition = new Partition(new org.apache.hadoop.hive.ql.metadata.Table(tbl), tp);
Assert.assertEquals(URLDecoder.decode(partition.getName(), "UTF-8"), "field1=2016/01/01/20");
TimestampedHiveDatasetVersion dv = versionFinder.getDatasetVersion(partition);
Assert.assertEquals(dv.getDateTime(), formatter.parseDateTime("2016/01/01/20"));
}
@Test
public void testMultiplePartitionFields() throws Exception {
DatePartitionHiveVersionFinder versionFinder = new DatePartitionHiveVersionFinder(this.fs, ConfigFactory.empty());
String tableName = "VfTb3";
Table tbl = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, tableName, ImmutableList.of("datepartition", "field1"));
org.apache.hadoop.hive.metastore.api.Partition tp =
this.hiveMetastoreTestUtils.addTestPartition(tbl, ImmutableList.of("2016-01-01-20", "f1"), (int) System.currentTimeMillis());
Partition partition = new Partition(new org.apache.hadoop.hive.ql.metadata.Table(tbl), tp);
assertThat(partition.getName(), anyOf(is("field1=f1/datepartition=2016-01-01-20"), is("datepartition=2016-01-01-20/field1=f1")));
TimestampedHiveDatasetVersion dv = versionFinder.getDatasetVersion(partition);
Assert.assertEquals(dv.getDateTime(), formatter.parseDateTime("2016/01/01/20"));
}
}
| 2,314 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/version/finder/HdfsModifiedTimeHiveVersionFinderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import java.io.IOException;
import org.apache.gobblin.data.management.version.TimestampedHiveDatasetVersion;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import com.typesafe.config.ConfigFactory;
@Test(groups = {"gobblin.data.management.version"})
public class HdfsModifiedTimeHiveVersionFinderTest {
public static final String PARTITION_NAME = "RandomDb@RandomTable@RandomPartition";
public static final String TIMESTAMP = "123456";
private FileSystem fs = Mockito.mock(FileSystem.class);
private Partition partition = Mockito.mock(Partition.class);
private FileStatus fileStatus = Mockito.mock(FileStatus.class);
private HdfsModifiedTimeHiveVersionFinder hdfsModifiedTimeHiveVersionFinder =
new HdfsModifiedTimeHiveVersionFinder(this.fs, ConfigFactory.empty());
@BeforeMethod
public void initialize() {
Mockito.doReturn(PARTITION_NAME).when(this.partition).getCompleteName();
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void testNullPartition() {
Partition partition = null;
TimestampedHiveDatasetVersion datasetVersion = this.hdfsModifiedTimeHiveVersionFinder.getDatasetVersion(partition);
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void testNullDataLocation() {
Mockito.doReturn(null).when(this.partition).getDataLocation();
TimestampedHiveDatasetVersion datasetVersion =
this.hdfsModifiedTimeHiveVersionFinder.getDatasetVersion(this.partition);
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void testInvalidDataLocation()
throws IOException {
Mockito.doReturn(new Path("Invalid Location")).when(this.partition).getDataLocation();
Mockito.doReturn(false).when(this.fs).exists(Mockito.any(Path.class));
TimestampedHiveDatasetVersion datasetVersion =
this.hdfsModifiedTimeHiveVersionFinder.getDatasetVersion(this.partition);
}
@Test
public void testTimeStampForVersion()
throws IOException {
Mockito.doReturn(new Path("Invalid Location")).when(this.partition).getDataLocation();
Mockito.doReturn(true).when(this.fs).exists(Mockito.any(Path.class));
Mockito.doReturn(this.fileStatus).when(this.fs).getFileStatus(Mockito.any(Path.class));
Mockito.doReturn(Long.valueOf(TIMESTAMP)).when(this.fileStatus).getModificationTime();
TimestampedHiveDatasetVersion datasetVersion =
this.hdfsModifiedTimeHiveVersionFinder.getDatasetVersion(this.partition);
// Check if the datasetVersion contains the correct partition
Assert.assertTrue(datasetVersion.getPartition().getCompleteName().equalsIgnoreCase(PARTITION_NAME));
// Check if the datasetVersion contains the correct modified timestamp of the underlying data location
Assert.assertTrue(datasetVersion.getDateTime().getMillis() == Long.valueOf(TIMESTAMP));
System.out.println(datasetVersion);
}
}
| 2,315 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/version | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/version/finder/LookbackDateTimeDatasetVersionFinderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.version.finder;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTimeZone;
import org.joda.time.Instant;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.util.ConfigUtils;
@Test(groups = { "gobblin.data.management.version" })
public class LookbackDateTimeDatasetVersionFinderTest {
private FileSystem fs;
private DateTimeFormatter formatter = DateTimeFormat.forPattern("yyyy/MM/dd/HH").withZone(DateTimeZone.forID(ConfigurationKeys.PST_TIMEZONE_NAME));
private final Instant fixedTime = Instant.parse("2023-01-01T12:30:00.000-08:00");
@Test
public void testHourlyVersions() throws Exception {
Properties properties = new Properties();
properties.put(DateTimeDatasetVersionFinder.DATE_TIME_PATTERN_KEY, "yyyy/MM/dd/HH");
properties.put(LookbackDateTimeDatasetVersionFinder.VERSION_PATH_PREFIX, "hourly");
properties.put(LookbackDateTimeDatasetVersionFinder.VERSION_LOOKBACK_PERIOD, "96h");
LookbackDateTimeDatasetVersionFinder versionFinder = new LookbackDateTimeDatasetVersionFinder(FileSystem.getLocal(new Configuration()),
ConfigUtils.propertiesToConfig(properties), fixedTime);
Dataset dataset = new TestDataset(new Path("/data/Dataset1"));
Collection<TimestampedDatasetVersion> datasetVersions = versionFinder.findDatasetVersions(dataset);
List<TimestampedDatasetVersion> sortedVersions = datasetVersions.stream().sorted().collect(Collectors.toList());
Assert.assertEquals(datasetVersions.size(), 97);
Assert.assertEquals(sortedVersions.get(0).getVersion().toString(formatter), "2022/12/28/12");
Assert.assertEquals(sortedVersions.get(0).getPath().toString(), "/data/Dataset1/hourly/2022/12/28/12");
Assert.assertEquals(sortedVersions.get(sortedVersions.size() - 1).getVersion().toString(formatter), "2023/01/01/12");
Assert.assertEquals(sortedVersions.get(sortedVersions.size() - 1).getPath().toString(), "/data/Dataset1/hourly/2023/01/01/12");
}
@Test
public void testDailyVersions() throws Exception {
Properties properties = new Properties();
properties.put(DateTimeDatasetVersionFinder.DATE_TIME_PATTERN_KEY, "yyyy/MM/dd");
properties.put(LookbackDateTimeDatasetVersionFinder.VERSION_PATH_PREFIX, "daily");
properties.put(LookbackDateTimeDatasetVersionFinder.VERSION_LOOKBACK_PERIOD, "366d");
LookbackDateTimeDatasetVersionFinder versionFinder = new LookbackDateTimeDatasetVersionFinder(FileSystem.getLocal(new Configuration()),
ConfigUtils.propertiesToConfig(properties), fixedTime);
Dataset dataset = new TestDataset(new Path("/data/Dataset1"));
Collection<TimestampedDatasetVersion> datasetVersions = versionFinder.findDatasetVersions(dataset);
List<TimestampedDatasetVersion> sortedVersions = datasetVersions.stream().sorted().collect(Collectors.toList());
Assert.assertEquals(datasetVersions.size(), 367);
Assert.assertEquals(sortedVersions.get(0).getVersion().toString(formatter), "2021/12/31/00");
Assert.assertEquals(sortedVersions.get(0).getPath().toString(), "/data/Dataset1/daily/2021/12/31");
Assert.assertEquals(sortedVersions.get(sortedVersions.size() - 1).getVersion().toString(formatter), "2023/01/01/00");
Assert.assertEquals(sortedVersions.get(sortedVersions.size() - 1).getPath().toString(), "/data/Dataset1/daily/2023/01/01");
}
}
class TestDataset implements FileSystemDataset {
private final Path datasetRoot;
public TestDataset(Path datasetRoot) {
this.datasetRoot = datasetRoot;
}
public Path datasetRoot() {
return datasetRoot;
}
public String datasetURN() {
return null;
}
}
| 2,316 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive/CopyPartitionParametersTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.publisher.HiveConvertPublisher;
/**
* @author adsharma
*/
@Test
public class CopyPartitionParametersTest {
private static final String SRC_PARTITION = "srcDb@srcTable@srcPartition";
private static final String DEST_PARTITION = "destDb@destTable@destPartition";
private static final String ALL = "*";
private static final String MY_VALUE = "myValue";
private static final String MY_PROP = "myProp";
private HiveConvertPublisher publisherMock = Mockito.mock(HiveConvertPublisher.class);
private WorkUnitState workUnitState = new WorkUnitState();
private Partition sourcePartition = Mockito.mock(Partition.class);
private Partition destPartition = Mockito.mock(Partition.class);
private Map<String, String> sourceParams = new HashMap<>();
private Map<String, String> destParams = new HashMap<>();
@BeforeTest
private void init() {
Mockito.doReturn(Optional.fromNullable(this.sourcePartition)).when(this.publisherMock)
.getPartitionObject(SRC_PARTITION);
Mockito.doReturn(Optional.fromNullable(this.destPartition)).when(this.publisherMock)
.getPartitionObject(DEST_PARTITION);
Mockito.doReturn(true).when(this.publisherMock).dropPartition(DEST_PARTITION);
Mockito.doReturn(true).when(this.publisherMock).addPartition(this.destPartition, DEST_PARTITION);
Mockito.doCallRealMethod().when(this.publisherMock)
.copyPartitionParams(SRC_PARTITION, DEST_PARTITION, Collections.singletonList(ALL), Collections.EMPTY_LIST);
Mockito.doCallRealMethod().when(this.publisherMock)
.preservePartitionParams(Collections.singleton(this.workUnitState));
Mockito.doReturn(this.sourceParams).when(this.sourcePartition).getParameters();
Mockito.doReturn(this.destParams).when(this.destPartition).getParameters();
}
public void test() {
this.workUnitState.setWorkingState(WorkUnitState.WorkingState.COMMITTED);
this.workUnitState.setProp(HiveConvertPublisher.COMPLETE_SOURCE_PARTITION_NAME, SRC_PARTITION);
this.workUnitState.setProp(HiveConvertPublisher.COMPLETE_DEST_PARTITION_NAME, DEST_PARTITION);
this.workUnitState.setProp(HiveConvertPublisher.PARTITION_PARAMETERS_WHITELIST, ALL);
this.sourceParams.put(MY_PROP, MY_VALUE);
this.publisherMock.preservePartitionParams(Collections.singleton(this.workUnitState));
Assert.assertTrue(this.destParams.get(MY_PROP).equalsIgnoreCase(MY_VALUE));
}
}
| 2,317 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive/BackfillHiveSourceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.data.management.conversion.hive.source.BackfillHiveSource;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
@Test(groups = {"gobblin.data.management.conversion"})
public class BackfillHiveSourceTest {
@Test
public void testNoWhitelist() throws Exception {
BackfillHiveSource backfillHiveSource = new BackfillHiveSource();
SourceState state = new SourceState();
backfillHiveSource.initBackfillHiveSource(state);
Partition sourcePartition = Mockito.mock(Partition.class, Mockito.RETURNS_SMART_NULLS);
Assert.assertTrue(backfillHiveSource.shouldCreateWorkunit(sourcePartition, new LongWatermark(0)));
}
@Test
public void testWhitelist() throws Exception {
BackfillHiveSource backfillHiveSource = new BackfillHiveSource();
SourceState state = new SourceState();
state.setProp(BackfillHiveSource.BACKFILL_SOURCE_PARTITION_WHITELIST_KEY,
"service@logEvent@datepartition=2016-08-04-00,service@logEvent@datepartition=2016-08-05-00");
backfillHiveSource.initBackfillHiveSource(state);
Partition pass1 = Mockito.mock(Partition.class, Mockito.RETURNS_SMART_NULLS);
Mockito.when(pass1.getCompleteName()).thenReturn("service@logEvent@datepartition=2016-08-04-00");
Partition pass2 = Mockito.mock(Partition.class, Mockito.RETURNS_SMART_NULLS);
Mockito.when(pass2.getCompleteName()).thenReturn("service@logEvent@datepartition=2016-08-05-00");
Partition fail = Mockito.mock(Partition.class, Mockito.RETURNS_SMART_NULLS);
Mockito.when(fail.getCompleteName()).thenReturn("service@logEvent@datepartition=2016-08-06-00");
Assert.assertTrue(backfillHiveSource.shouldCreateWorkunit(pass1, new LongWatermark(0)));
Assert.assertTrue(backfillHiveSource.shouldCreateWorkunit(pass2, new LongWatermark(0)));
Assert.assertFalse(backfillHiveSource.shouldCreateWorkunit(fail, new LongWatermark(0)));
}
}
| 2,318 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive/LocalHiveMetastoreTestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.serde2.avro.AvroSerDe;
import org.apache.thrift.TException;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Files;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.hive.avro.HiveAvroSerDeManager;
/**
* Provides a singleton instance of local Hive metastore client and helper methods
* to run test cases
*/
@Slf4j
public class LocalHiveMetastoreTestUtils {
private static LocalHiveMetastoreTestUtils instance;
private IMetaStoreClient localMetastoreClient;
private LocalHiveMetastoreTestUtils() throws IOException {
try {
FileUtils.deleteDirectory(new File("metastore_db"));
} catch (IOException e) {
e.printStackTrace();
}
File tmpDir = Files.createTempDir();
tmpDir.deleteOnExit();
Properties p = System.getProperties();
p.setProperty("derby.system.home", tmpDir.getAbsolutePath());
this.localMetastoreClient =
HiveMetastoreClientPool.get(new Properties(), Optional.<String>absent()).getClient().get();
}
static {
try {
// Not most optimal singleton initialization, but sufficient for test
instance = new LocalHiveMetastoreTestUtils();
} catch (IOException e) {
throw new RuntimeException("Exception occurred in initializing ConversionHiveUtils", e);
} catch (Throwable t) {
throw new RuntimeException("Exception occurred in initializing ConversionHiveUtils", t);
}
}
public static LocalHiveMetastoreTestUtils getInstance() {
return instance;
}
public IMetaStoreClient getLocalMetastoreClient() {
return localMetastoreClient;
}
public void dropDatabaseIfExists(String dbName) throws MetaException, TException {
try {
this.getLocalMetastoreClient().getDatabase(dbName);
this.getLocalMetastoreClient().dropDatabase(dbName, false, true, true);
} catch (NoSuchObjectException e) {
// No need to drop
}
}
public Table createTestAvroTable(String dbName, String tableName, String tableSdLoc, Optional<String> partitionFieldName)
throws Exception {
return createTestAvroTable(dbName, tableName, tableSdLoc, partitionFieldName, false);
}
public Table createTestAvroTable(String dbName, String tableName, String tableSdLoc,
Optional<String> partitionFieldName, boolean ignoreDbCreation) throws Exception {
if (!ignoreDbCreation) {
createTestDb(dbName);
}
Table tbl = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(dbName, tableName);
tbl.getSd().setLocation(tableSdLoc);
tbl.getSd().getSerdeInfo().setSerializationLib(AvroSerDe.class.getName());
tbl.getSd().getSerdeInfo().setParameters(ImmutableMap.of(HiveAvroSerDeManager.SCHEMA_URL, "/tmp/dummy"));
if (partitionFieldName.isPresent()) {
tbl.addToPartitionKeys(new FieldSchema(partitionFieldName.get(), "string", "some comment"));
}
this.localMetastoreClient.createTable(tbl);
return tbl;
}
public Table createTestAvroTable(String dbName, String tableName, List<String> partitionFieldNames) throws Exception {
return createTestAvroTable(dbName, tableName, "/tmp/" + tableName, partitionFieldNames, true);
}
public Table createTestAvroTable(String dbName, String tableName, String tableSdLoc,
List<String> partitionFieldNames, boolean ignoreDbCreation)
throws Exception {
if (!ignoreDbCreation) {
createTestDb(dbName);
}
Table tbl = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(dbName, tableName);
tbl.getSd().setLocation(tableSdLoc);
tbl.getSd().getSerdeInfo().setParameters(ImmutableMap.of(HiveAvroSerDeManager.SCHEMA_URL, "/tmp/dummy"));
for (String partitionFieldName : partitionFieldNames) {
tbl.addToPartitionKeys(new FieldSchema(partitionFieldName, "string", "some comment"));
}
this.localMetastoreClient.createTable(tbl);
return tbl;
}
public void createTestDb(String dbName) throws Exception {
Database db = new Database(dbName, "Some description", "/tmp/" + dbName, new HashMap<String, String>());
try {
this.localMetastoreClient.createDatabase(db);
} catch (AlreadyExistsException e) {
log.warn(dbName + " already exits");
}
}
public Partition addTestPartition(Table tbl, List<String> values, int createTime) throws Exception {
StorageDescriptor partitionSd = new StorageDescriptor();
if (StringUtils.isNotBlank(tbl.getSd().getLocation())) {
partitionSd.setLocation(tbl.getSd().getLocation() + values);
} else {
partitionSd.setLocation("/tmp/" + tbl.getTableName() + "/part1");
}
partitionSd.setSerdeInfo(
new SerDeInfo("name", "serializationLib", ImmutableMap.of(HiveAvroSerDeManager.SCHEMA_URL, "/tmp/dummy")));
partitionSd.setCols(tbl.getPartitionKeys());
Partition partition =
new Partition(values, tbl.getDbName(), tbl.getTableName(), 1, 1, partitionSd, new HashMap<String, String>());
partition.setCreateTime(createTime);
return this.getLocalMetastoreClient().add_partition(partition);
}
public org.apache.hadoop.hive.ql.metadata.Partition createDummyPartition(long createTime) {
org.apache.hadoop.hive.ql.metadata.Partition partition = new org.apache.hadoop.hive.ql.metadata.Partition();
Partition tPartition = new Partition();
tPartition.setCreateTime((int) TimeUnit.SECONDS.convert(createTime, TimeUnit.MILLISECONDS));
partition.setTPartition(tPartition);
return partition;
}
}
| 2,319 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive/HiveSourceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive;
import java.io.File;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Table;
import org.joda.time.DateTime;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.io.Files;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.ConversionHiveTestUtils;
import org.apache.gobblin.data.management.conversion.hive.source.HiveSource;
import org.apache.gobblin.data.management.conversion.hive.source.HiveWorkUnit;
import org.apache.gobblin.data.management.conversion.hive.watermarker.PartitionLevelWatermarker;
import org.apache.gobblin.data.management.conversion.hive.watermarker.TableLevelWatermarker;
import org.apache.gobblin.source.workunit.WorkUnit;
@Test(groups = { "gobblin.data.management.conversion" })
public class HiveSourceTest {
private static final String TEST_TABLE_1 = "testtable1";
private static final String TEST_TABLE_2 = "testtable2";
private static final String TEST_TABLE_3 = "testtable3";
private static final String TEST_TABLE_4 = "testtable4";
private static final String TEST_TABLE_5 = "testtable5";
private LocalHiveMetastoreTestUtils hiveMetastoreTestUtils;
private HiveSource hiveSource;
private File tmpDir;
@BeforeClass
public void setup() {
this.hiveMetastoreTestUtils = LocalHiveMetastoreTestUtils.getInstance();
this.hiveSource = new HiveSource();
this.tmpDir = Files.createTempDir();
tmpDir.deleteOnExit();
}
@Test
public void testGetWorkUnitsForTable() throws Exception {
String dbName = "testdb2";
String tableSdLoc = new File(this.tmpDir, TEST_TABLE_2).getAbsolutePath();
this.hiveMetastoreTestUtils.getLocalMetastoreClient().dropDatabase(dbName, false, true, true);
SourceState testState = getTestState(dbName);
this.hiveMetastoreTestUtils.createTestAvroTable(dbName, TEST_TABLE_2, tableSdLoc, Optional.<String> absent());
List<WorkUnit> workUnits = hiveSource.getWorkunits(testState);
// One workunit for the table, no dummy workunits
Assert.assertEquals(workUnits.size(), 1);
WorkUnit wu = workUnits.get(0);
HiveWorkUnit hwu = new HiveWorkUnit(wu);
Assert.assertEquals(hwu.getHiveDataset().getDbAndTable().getDb(), dbName);
Assert.assertEquals(hwu.getHiveDataset().getDbAndTable().getTable(), TEST_TABLE_2);
Assert.assertEquals(hwu.getTableSchemaUrl(), new Path("/tmp/dummy"));
}
@Test
public void testAlreadyExistsPartition() throws Exception {
String dbName = "testdb";
String tableSdLoc = new File(this.tmpDir, TEST_TABLE_1).getAbsolutePath();
this.hiveMetastoreTestUtils.getLocalMetastoreClient().dropDatabase(dbName, false, true, true);
Table tbl = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, TEST_TABLE_1, tableSdLoc, Optional.of("field"));
this.hiveMetastoreTestUtils.addTestPartition(tbl, ImmutableList.of("f1"), (int) System.currentTimeMillis());
try {
this.hiveMetastoreTestUtils.addTestPartition(tbl, ImmutableList.of("f1"), (int) System.currentTimeMillis());
} catch (AlreadyExistsException e) {
return;
}
Assert.fail();
}
@Test
public void testPartitionNotExists() throws Exception {
String dbName = "testdb1";
String tableSdLoc = new File(this.tmpDir, TEST_TABLE_1).getAbsolutePath();
this.hiveMetastoreTestUtils.getLocalMetastoreClient().dropDatabase(dbName, false, true, true);
Table tbl = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, TEST_TABLE_1, tableSdLoc, Optional.of("field"));
try {
this.hiveMetastoreTestUtils.getLocalMetastoreClient().getPartition(tbl.getDbName(), tbl.getTableName(), "field");
} catch (NoSuchObjectException e) {
return;
}
Assert.fail();
}
@Test
public void testGetWorkUnitsForPartitions() throws Exception {
String dbName = "testdb3";
String tableSdLoc = new File(this.tmpDir, TEST_TABLE_3).getAbsolutePath();
this.hiveMetastoreTestUtils.getLocalMetastoreClient().dropDatabase(dbName, false, true, true);
SourceState testState = getTestState(dbName);
Table tbl = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, TEST_TABLE_3, tableSdLoc, Optional.of("field"));
this.hiveMetastoreTestUtils.addTestPartition(tbl, ImmutableList.of("f1"), (int) System.currentTimeMillis());
List<WorkUnit> workUnits = this.hiveSource.getWorkunits(testState);
// One workunit for the partition + 1 dummy watermark workunit
Assert.assertEquals(workUnits.size(), 2);
WorkUnit wu = workUnits.get(0);
WorkUnit wu2 = workUnits.get(1);
HiveWorkUnit hwu;
if (!wu.contains(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY)) {
hwu = new HiveWorkUnit(wu);
} else {
hwu = new HiveWorkUnit(wu2);
}
Assert.assertEquals(hwu.getHiveDataset().getDbAndTable().getDb(), dbName);
Assert.assertEquals(hwu.getHiveDataset().getDbAndTable().getTable(), TEST_TABLE_3);
Assert.assertEquals(hwu.getPartitionName().get(), "field=f1");
}
@Test
public void testGetWorkunitsAfterWatermark() throws Exception {
String dbName = "testdb4";
String tableSdLoc1 = new File(this.tmpDir, TEST_TABLE_4).getAbsolutePath();
String tableSdLoc2 = new File(this.tmpDir, TEST_TABLE_5).getAbsolutePath();
this.hiveMetastoreTestUtils.getLocalMetastoreClient().dropDatabase(dbName, false, true, true);
this.hiveMetastoreTestUtils.createTestAvroTable(dbName, TEST_TABLE_4, tableSdLoc1, Optional.<String> absent());
this.hiveMetastoreTestUtils.createTestAvroTable(dbName, TEST_TABLE_5, tableSdLoc2, Optional.<String> absent(), true);
List<WorkUnitState> previousWorkUnitStates = Lists.newArrayList();
Table table1 = this.hiveMetastoreTestUtils.getLocalMetastoreClient().getTable(dbName, TEST_TABLE_4);
// Denote watermark to have a past created timestamp, so that the watermark workunit gets generated
// This is so that the test is reliable across different operating systems and not flaky to System timing differences
previousWorkUnitStates.add(ConversionHiveTestUtils.createWus(dbName, TEST_TABLE_4,
TimeUnit.MILLISECONDS.convert(table1.getCreateTime(), TimeUnit.SECONDS)-100));
SourceState testState = new SourceState(getTestState(dbName), previousWorkUnitStates);
testState.setProp(HiveSource.HIVE_SOURCE_WATERMARKER_FACTORY_CLASS_KEY, TableLevelWatermarker.Factory.class.getName());
List<WorkUnit> workUnits = this.hiveSource.getWorkunits(testState);
Assert.assertEquals(workUnits.size(), 2);
WorkUnit wu = workUnits.get(0);
HiveWorkUnit hwu = new HiveWorkUnit(wu);
Assert.assertEquals(hwu.getHiveDataset().getDbAndTable().getDb(), dbName);
Assert.assertEquals(hwu.getHiveDataset().getDbAndTable().getTable(), TEST_TABLE_4);
WorkUnit wu2 = workUnits.get(1);
HiveWorkUnit hwu2 = new HiveWorkUnit(wu2);
Assert.assertEquals(hwu2.getHiveDataset().getDbAndTable().getDb(), dbName);
Assert.assertEquals(hwu2.getHiveDataset().getDbAndTable().getTable(), TEST_TABLE_5);
}
@Test
public void testShouldCreateWorkunitsOlderThanLookback() throws Exception {
long currentTime = System.currentTimeMillis();
long partitionCreateTime = new DateTime(currentTime).minusDays(35).getMillis();
org.apache.hadoop.hive.ql.metadata.Partition partition =
this.hiveMetastoreTestUtils.createDummyPartition(partitionCreateTime);
SourceState testState = getTestState("testDb6");
HiveSource source = new HiveSource();
source.initialize(testState);
boolean isOlderThanLookback = source.isOlderThanLookback(partition);
Assert.assertEquals(isOlderThanLookback, true, "Should not create workunits older than lookback");
}
@Test
public void testShouldCreateWorkunitsNewerThanLookback() throws Exception {
long currentTime = System.currentTimeMillis();
// Default lookback time is 3 days
long partitionCreateTime = new DateTime(currentTime).minusDays(2).getMillis();
org.apache.hadoop.hive.ql.metadata.Partition partition =
this.hiveMetastoreTestUtils.createDummyPartition(partitionCreateTime);
SourceState testState = getTestState("testDb7");
HiveSource source = new HiveSource();
source.initialize(testState);
boolean isOlderThanLookback = source.isOlderThanLookback(partition);
Assert.assertEquals(isOlderThanLookback, false, "Should create workunits newer than lookback");
}
@Test
public void testIsOlderThanLookbackForDistcpGenerationTime() throws Exception {
long currentTime = System.currentTimeMillis();
// Default lookback time is 3 days
long partitionCreateTime = new DateTime(currentTime).minusDays(2).getMillis();
Map<String, String> parameters = Maps.newHashMap();
parameters.put(HiveSource.DISTCP_REGISTRATION_GENERATION_TIME_KEY, partitionCreateTime + "");
org.apache.hadoop.hive.ql.metadata.Partition partition = this.hiveMetastoreTestUtils.createDummyPartition(0);
partition.getTPartition().setParameters(parameters);
SourceState testState = getTestState("testDb6");
HiveSource source = new HiveSource();
source.initialize(testState);
boolean isOlderThanLookback = source.isOlderThanLookback(partition);
Assert.assertEquals(isOlderThanLookback, false, "Should create workunits newer than lookback");
}
private static SourceState getTestState(String dbName) {
SourceState testState = new SourceState();
testState.setProp("hive.dataset.database", dbName);
testState.setProp("hive.dataset.table.pattern", "*");
testState.setProp(ConfigurationKeys.JOB_ID_KEY, "testJobId");
return testState;
}
}
| 2,320 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive/converter/HiveSchemaEvolutionTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.converter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import lombok.extern.slf4j.Slf4j;
import org.apache.avro.Schema;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.gobblin.data.management.ConversionHiveTestUtils;
import org.apache.gobblin.data.management.conversion.hive.query.HiveAvroORCQueryGenerator;
import org.apache.gobblin.util.AvroFlattener;
/**
* Test for schema evolution enabled or disabled
*/
@Slf4j
@Test(groups = { "gobblin.data.management.conversion" })
public class HiveSchemaEvolutionTest {
private static String resourceDir = "avroToOrcSchemaEvolutionTest";
private static String schemaName = "sourceSchema";
private static String hiveDbName = "hiveDb";
private static AvroFlattener avroFlattener = new AvroFlattener();
private static Schema inputSchema;
private static Schema outputSchema;
private static Optional<Integer> rowLimit = Optional.absent();
static {
try {
inputSchema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir, "source_schema.json");
outputSchema = avroFlattener.flatten(inputSchema, true);
} catch (IOException e) {
throw new RuntimeException("Could not initialized tests", e);
}
}
@Test
public void testEvolutionEnabledForExistingTable() throws IOException {
boolean isEvolutionEnabled = true;
Optional<Table> destinationTableMeta = createEvolvedDestinationTable(schemaName, "default", "", true);
String ddl = HiveAvroORCQueryGenerator
.generateCreateTableDDL(outputSchema,
schemaName, "file:/user/hive/warehouse/" + schemaName,
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(),
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(),
Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(),
null, isEvolutionEnabled, true, destinationTableMeta,
new HashMap<String, String>());
Assert.assertEquals(ddl, ConversionHiveTestUtils.readQueryFromFile(resourceDir,
"source_schema_evolution_enabled.ddl"), "Generated DDL did not match expected for evolution enabled");
String dml = HiveAvroORCQueryGenerator
.generateTableMappingDML(inputSchema, outputSchema, schemaName, schemaName + "_orc", Optional.<String>absent(),
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<Boolean>absent(),
Optional.<Boolean>absent(), isEvolutionEnabled, destinationTableMeta, rowLimit);
Assert.assertEquals(dml, ConversionHiveTestUtils.readQueryFromFile(resourceDir,
"source_schema_evolution_enabled.dml"), "Generated DML did not match expected for evolution enabled");
}
@Test
public void testEvolutionEnabledForNewTable() throws IOException {
boolean isEvolutionEnabled = true;
Optional<Table> destinationTableMeta = Optional.absent();
String ddl = HiveAvroORCQueryGenerator
.generateCreateTableDDL(outputSchema, schemaName, "file:/user/hive/warehouse/" + schemaName,
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(),
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(),
Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(),
null, isEvolutionEnabled, true, destinationTableMeta,
new HashMap<String, String>());
Assert.assertEquals(ddl, ConversionHiveTestUtils.readQueryFromFile(resourceDir,
"source_schema_evolution_enabled.ddl"),
"Generated DDL did not match expected for evolution enabled");
String dml = HiveAvroORCQueryGenerator
.generateTableMappingDML(inputSchema, outputSchema, schemaName, schemaName + "_orc", Optional.<String>absent(),
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<Boolean>absent(),
Optional.<Boolean>absent(), isEvolutionEnabled, destinationTableMeta, rowLimit);
Assert.assertEquals(dml, ConversionHiveTestUtils.readQueryFromFile(resourceDir,
"source_schema_evolution_enabled.dml"),
"Generated DML did not match expected for evolution enabled");
}
@Test
public void testEvolutionDisabledForExistingTable() throws IOException {
boolean isEvolutionEnabled = false;
boolean casePreserved = true;
Optional<Table> destinationTableMeta = createEvolvedDestinationTable(schemaName, "default", "", true);
String ddl = HiveAvroORCQueryGenerator
.generateCreateTableDDL(outputSchema, schemaName, "file:/user/hive/warehouse/" + schemaName,
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(),
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(),
Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(),
null, isEvolutionEnabled, casePreserved, destinationTableMeta,
new HashMap<String, String>());
Assert.assertEquals(ddl, ConversionHiveTestUtils.readQueryFromFile(resourceDir,
"source_schema_evolution_disabled.ddl"),
"Generated DDL did not match expected for evolution disabled");
String dml = HiveAvroORCQueryGenerator
.generateTableMappingDML(inputSchema, outputSchema, schemaName, schemaName + "_orc", Optional.<String>absent(),
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<Boolean>absent(),
Optional.<Boolean>absent(), isEvolutionEnabled, destinationTableMeta, rowLimit);
Assert.assertEquals(dml, ConversionHiveTestUtils.readQueryFromFile(resourceDir,
"source_schema_evolution_disabled.dml"),
"Generated DML did not match expected for evolution disabled");
}
@Test
public void testEvolutionDisabledForNewTable() throws IOException {
boolean isEvolutionEnabled = false;
Optional<Table> destinationTableMeta = Optional.absent();
String ddl = HiveAvroORCQueryGenerator
.generateCreateTableDDL(outputSchema, schemaName, "file:/user/hive/warehouse/" + schemaName,
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(),
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(),
Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(),
null, isEvolutionEnabled, true, destinationTableMeta,
new HashMap<String, String>());
Assert.assertEquals(ddl, ConversionHiveTestUtils.readQueryFromFile(resourceDir,
"source_schema_evolution_enabled.ddl"),
"Generated DDL did not match expected for evolution disabled");
String dml = HiveAvroORCQueryGenerator
.generateTableMappingDML(inputSchema, outputSchema, schemaName, schemaName + "_orc", Optional.<String>absent(),
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<Boolean>absent(),
Optional.<Boolean>absent(), isEvolutionEnabled, destinationTableMeta, rowLimit);
Assert.assertEquals(dml, ConversionHiveTestUtils.readQueryFromFile(resourceDir,
"source_schema_evolution_enabled.dml"),
"Generated DML did not match expected for evolution disabled");
}
@Test
public void testLineageMissing() throws IOException {
boolean isEvolutionEnabled = false;
Optional<Table> destinationTableMeta = createEvolvedDestinationTable(schemaName, "default", "", false);
String ddl = HiveAvroORCQueryGenerator
.generateCreateTableDDL(outputSchema, schemaName, "file:/user/hive/warehouse/" + schemaName,
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(),
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(),
Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(),
null, isEvolutionEnabled, true, destinationTableMeta,
new HashMap<String, String>());
Assert.assertEquals(ddl, ConversionHiveTestUtils.readQueryFromFile(resourceDir,
"source_schema_lineage_missing.ddl"),
"Generated DDL did not match expected for evolution disabled");
String dml = HiveAvroORCQueryGenerator
.generateTableMappingDML(inputSchema, outputSchema, schemaName, schemaName + "_orc", Optional.<String>absent(),
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<Boolean>absent(),
Optional.<Boolean>absent(), isEvolutionEnabled, destinationTableMeta, rowLimit);
Assert.assertEquals(dml, ConversionHiveTestUtils.readQueryFromFile(resourceDir,
"source_schema_lineage_missing.dml"),
"Generated DML did not match expected for evolution disabled");
}
@Test
public void testEvolutionEnabledGenerateEvolutionDDL() {
String orcStagingTableName = schemaName + "_staging";
String orcTableName = schemaName;
boolean isEvolutionEnabled = true;
Optional<Table> destinationTableMeta = createEvolvedDestinationTable(schemaName, "default", "", true);
Map<String, String> hiveColumns = new HashMap<>();
// Call to help populate hiveColumns via real code path
HiveAvroORCQueryGenerator.generateCreateTableDDL(outputSchema, schemaName, "/tmp/dummy", Optional.<String>absent(),
Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(),
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(),
Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(),
null, isEvolutionEnabled, true, destinationTableMeta, hiveColumns);
// Destination table exists
Properties tableProperties = new Properties();
tableProperties.setProperty("random", "value");
List<String> generateEvolutionDDL = HiveAvroORCQueryGenerator
.generateEvolutionDDL(orcStagingTableName, orcTableName, Optional.of(hiveDbName), Optional.of(hiveDbName),
outputSchema, isEvolutionEnabled, hiveColumns, destinationTableMeta, tableProperties);
Assert.assertEquals(generateEvolutionDDL.size(), 4);
Assert.assertEquals(generateEvolutionDDL.get(1),
"ALTER TABLE `sourceSchema` ADD COLUMNS (`parentFieldRecord__nestedFieldInt` int "
+ "COMMENT 'from flatten_source parentFieldRecord.nestedFieldInt')",
"Generated evolution DDL did not match for evolution enabled");
// Destination table does not exists
destinationTableMeta = Optional.absent();
generateEvolutionDDL = HiveAvroORCQueryGenerator
.generateEvolutionDDL(orcStagingTableName, orcTableName, Optional.of(hiveDbName), Optional.of(hiveDbName),
outputSchema, isEvolutionEnabled, hiveColumns, destinationTableMeta, tableProperties);
// No DDL should be generated, because create table will take care of destination table
Assert.assertEquals(generateEvolutionDDL.size(), 0,
"Generated evolution DDL did not match for evolution enabled");
}
@Test
public void testEvolutionDisabledGenerateEvolutionDDL() {
String orcStagingTableName = schemaName + "_staging";
String orcTableName = schemaName;
boolean isEvolutionEnabled = false;
Optional<Table> destinationTableMeta = createEvolvedDestinationTable(schemaName, "default", "", true);
Map<String, String> hiveColumns = new HashMap<>();
// Call to help populate hiveColumns via real code path
HiveAvroORCQueryGenerator.generateCreateTableDDL(outputSchema, schemaName, "/tmp/dummy", Optional.<String>absent(),
Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(),
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(),
Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(),
null, isEvolutionEnabled, true, destinationTableMeta, hiveColumns);
// Destination table exists
Properties tableProperties = new Properties();
tableProperties.setProperty("random", "value");
List<String> generateEvolutionDDL = HiveAvroORCQueryGenerator
.generateEvolutionDDL(orcStagingTableName, orcTableName, Optional.of(hiveDbName), Optional.of(hiveDbName),
outputSchema, isEvolutionEnabled, hiveColumns, destinationTableMeta, tableProperties);
// No DDL should be generated, because select based on destination table will selectively project columns
Assert.assertEquals(generateEvolutionDDL.size(), 0,
"Generated evolution DDL did not match for evolution disabled");
// Destination table does not exists
destinationTableMeta = Optional.absent();
generateEvolutionDDL = HiveAvroORCQueryGenerator
.generateEvolutionDDL(orcStagingTableName, orcTableName, Optional.of(hiveDbName), Optional.of(hiveDbName),
outputSchema, isEvolutionEnabled, hiveColumns, destinationTableMeta, tableProperties);
// No DDL should be generated, because create table will take care of destination table
Assert.assertEquals(generateEvolutionDDL.size(), 0,
"Generated evolution DDL did not match for evolution disabled");
}
private Optional<Table> createEvolvedDestinationTable(String tableName, String dbName, String location,
boolean withComment) {
List<FieldSchema> cols = new ArrayList<>();
// Existing columns that match avroToOrcSchemaEvolutionTest/source_schema_evolution_enabled.ddl
cols.add(new FieldSchema("parentFieldRecord__nestedFieldRecord__superNestedFieldString", "string",
withComment ? "from flatten_source parentFieldRecord.nestedFieldRecord.superNestedFieldString" : ""));
cols.add(new FieldSchema("parentFieldRecord__nestedFieldRecord__superNestedFieldInt", "int",
withComment ? "from flatten_source parentFieldRecord.nestedFieldRecord.superNestedFieldInt" : ""));
cols.add(new FieldSchema("parentFieldRecord__nestedFieldString", "string",
withComment ? "from flatten_source parentFieldRecord.nestedFieldString" : ""));
// The following column is skipped (simulating un-evolved schema):
// Column name : parentFieldRecord__nestedFieldInt
// Column type : int
// Column comment: from flatten_source parentFieldRecord.nestedFieldInt
cols.add(new FieldSchema("parentFieldInt", "int",
withComment ? "from flatten_source parentFieldInt" : ""));
// Extra schema
cols.add(new FieldSchema("parentFieldRecord__nestedFieldString2", "string",
withComment ? "from flatten_source parentFieldRecord.nestedFieldString2" : ""));
String inputFormat = "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat";
String outputFormat = "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat";
StorageDescriptor storageDescriptor = new StorageDescriptor(cols, location, inputFormat, outputFormat, false, 0,
new SerDeInfo(), null, Lists.<Order>newArrayList(), null);
Table table = new Table(tableName, dbName, "ketl_dev", 0, 0, 0, storageDescriptor,
Lists.<FieldSchema>newArrayList(), Maps.<String,String>newHashMap(), "", "", "");
return Optional.of(table);
}
}
| 2,321 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive/converter/HiveAvroToOrcConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.converter;
import java.io.File;
import java.io.IOException;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.ConversionHiveTestUtils;
import org.apache.gobblin.data.management.conversion.hive.LocalHiveMetastoreTestUtils;
import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset;
import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDatasetTest;
import org.apache.gobblin.data.management.conversion.hive.entities.QueryBasedHiveConversionEntity;
import org.apache.gobblin.data.management.conversion.hive.entities.SchemaAwareHivePartition;
import org.apache.gobblin.data.management.conversion.hive.entities.SchemaAwareHiveTable;
import org.apache.gobblin.data.management.copy.hive.WhitelistBlacklist;
@Test(groups = { "gobblin.data.management.conversion" })
public class HiveAvroToOrcConverterTest {
private static final String TEST_TABLE = "testtable";
private static String resourceDir = "hiveConverterTest";
private LocalHiveMetastoreTestUtils hiveMetastoreTestUtils;
private File tmpDir;
public HiveAvroToOrcConverterTest() {
this.tmpDir = Files.createTempDir();
tmpDir.deleteOnExit();
this.hiveMetastoreTestUtils = LocalHiveMetastoreTestUtils.getInstance();
}
/***
* Test flattened DDL and DML generation
* @throws IOException
*/
@Test
public void testFlattenSchemaDDLandDML() throws Exception {
String dbName = "testdb";
String tableSdLoc = new File(this.tmpDir, TEST_TABLE).getAbsolutePath();
this.hiveMetastoreTestUtils.getLocalMetastoreClient().dropDatabase(dbName, false, true, true);
Table table = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, TEST_TABLE, tableSdLoc, Optional.<String> absent());
Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir, "recordWithinRecordWithinRecord_nested.json");
WorkUnitState wus = ConversionHiveTestUtils.createWus(dbName, TEST_TABLE, 0);
try (HiveAvroToFlattenedOrcConverter converter = new HiveAvroToFlattenedOrcConverter();) {
Config config = ConfigFactory.parseMap(
ImmutableMap.<String, String>builder().put("destinationFormats", "flattenedOrc")
.put("flattenedOrc.destination.dbName", dbName)
.put("flattenedOrc.destination.tableName", TEST_TABLE + "_orc")
.put("flattenedOrc.destination.dataPath", "file:" + tableSdLoc + "_orc").build());
ConvertibleHiveDataset cd = ConvertibleHiveDatasetTest.createTestConvertibleDataset(config);
List<QueryBasedHiveConversionEntity> conversionEntities =
Lists.newArrayList(converter.convertRecord(converter.convertSchema(schema, wus),
new QueryBasedHiveConversionEntity(cd, new SchemaAwareHiveTable(table, schema)), wus));
Assert.assertEquals(conversionEntities.size(), 1, "Only one query entity should be returned");
QueryBasedHiveConversionEntity queryBasedHiveConversionEntity = conversionEntities.get(0);
List<String> queries = queryBasedHiveConversionEntity.getQueries();
Assert.assertEquals(queries.size(), 4, "4 DDL and one DML query should be returned");
// Ignoring part before first bracket in DDL and 'select' clause in DML because staging table has
// .. a random name component
String actualDDLQuery = StringUtils.substringAfter("(", queries.get(0).trim());
String actualDMLQuery = StringUtils.substringAfter("SELECT", queries.get(0).trim());
String expectedDDLQuery = StringUtils.substringAfter("(",
ConversionHiveTestUtils.readQueryFromFile(resourceDir, "recordWithinRecordWithinRecord_flattened.ddl"));
String expectedDMLQuery = StringUtils.substringAfter("SELECT",
ConversionHiveTestUtils.readQueryFromFile(resourceDir, "recordWithinRecordWithinRecord_flattened.dml"));
Assert.assertEquals(actualDDLQuery, expectedDDLQuery);
Assert.assertEquals(actualDMLQuery, expectedDMLQuery);
}
}
/***
* Test nested DDL and DML generation
* @throws IOException
*/
@Test
public void testNestedSchemaDDLandDML() throws Exception {
String dbName = "testdb";
String tableSdLoc = new File(this.tmpDir, TEST_TABLE).getAbsolutePath();
this.hiveMetastoreTestUtils.getLocalMetastoreClient().dropDatabase(dbName, false, true, true);
Table table = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, TEST_TABLE, tableSdLoc, Optional.<String> absent());
Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir, "recordWithinRecordWithinRecord_nested.json");
WorkUnitState wus = ConversionHiveTestUtils.createWus(dbName, TEST_TABLE, 0);
wus.getJobState().setProp("orc.table.flatten.schema", "false");
try (HiveAvroToNestedOrcConverter converter = new HiveAvroToNestedOrcConverter();) {
Config config = ConfigFactory.parseMap(ImmutableMap.<String, String> builder()
.put("destinationFormats", "nestedOrc")
.put("nestedOrc.destination.tableName","testtable_orc_nested")
.put("nestedOrc.destination.dbName",dbName)
.put("nestedOrc.destination.dataPath","file:" + tableSdLoc + "_orc_nested")
.build());
ConvertibleHiveDataset cd = ConvertibleHiveDatasetTest.createTestConvertibleDataset(config);
List<QueryBasedHiveConversionEntity> conversionEntities =
Lists.newArrayList(converter.convertRecord(converter.convertSchema(schema, wus), new QueryBasedHiveConversionEntity(cd , new SchemaAwareHiveTable(table, schema)), wus));
Assert.assertEquals(conversionEntities.size(), 1, "Only one query entity should be returned");
QueryBasedHiveConversionEntity queryBasedHiveConversionEntity = conversionEntities.get(0);
List<String> queries = queryBasedHiveConversionEntity.getQueries();
Assert.assertEquals(queries.size(), 4, "4 DDL and one DML query should be returned");
// Ignoring part before first bracket in DDL and 'select' clause in DML because staging table has
// .. a random name component
String actualDDLQuery = StringUtils.substringAfter("(", queries.get(0).trim());
String actualDMLQuery = StringUtils.substringAfter("SELECT", queries.get(0).trim());
String expectedDDLQuery = StringUtils.substringAfter("(",
ConversionHiveTestUtils.readQueryFromFile(resourceDir, "recordWithinRecordWithinRecord_nested.ddl"));
String expectedDMLQuery = StringUtils.substringAfter("SELECT",
ConversionHiveTestUtils.readQueryFromFile(resourceDir, "recordWithinRecordWithinRecord_nested.dml"));
Assert.assertEquals(actualDDLQuery, expectedDDLQuery);
Assert.assertEquals(actualDMLQuery, expectedDMLQuery);
}
}
@Test
public void dropReplacedPartitionsTest() throws Exception {
Table table = ConvertibleHiveDatasetTest.getTestTable("dbName", "tableName");
table.setTableType("VIRTUAL_VIEW");
table.setPartitionKeys(ImmutableList.of(new FieldSchema("year", "string", ""), new FieldSchema("month", "string", "")));
Partition part = new Partition();
part.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01"));
SchemaAwareHiveTable hiveTable = new SchemaAwareHiveTable(table, null);
SchemaAwareHivePartition partition = new SchemaAwareHivePartition(table, part, null);
QueryBasedHiveConversionEntity conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(partition));
List<ImmutableMap<String, String>> expected =
ImmutableList.of(ImmutableMap.of("year", "2015", "month", "12"), ImmutableMap.of("year", "2016", "month", "01"));
Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected);
// Make sure that a partition itself is not dropped
Partition replacedSelf = new Partition();
replacedSelf.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01|2016,02"));
replacedSelf.setValues(ImmutableList.of("2016", "02"));
conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(new SchemaAwareHivePartition(table, replacedSelf, null)));
Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected);
}
@Test
/***
* More comprehensive tests for WhiteBlackList are in: {@link org.apache.gobblin.data.management.copy.hive.WhitelistBlacklistTest}
*/
public void hiveViewRegistrationWhiteBlackListTest() throws Exception {
WorkUnitState wus = ConversionHiveTestUtils.createWus("dbName", "tableName", 0);
Optional<WhitelistBlacklist> optionalWhitelistBlacklist = AbstractAvroToOrcConverter.getViewWhiteBackListFromWorkUnit(wus);
Assert.assertTrue(!optionalWhitelistBlacklist.isPresent(),
"No whitelist blacklist specified in WUS, WhiteListBlackList object should be absent");
wus.setProp(AbstractAvroToOrcConverter.HIVE_CONVERSION_VIEW_REGISTRATION_WHITELIST, "");
wus.setProp(AbstractAvroToOrcConverter.HIVE_CONVERSION_VIEW_REGISTRATION_BLACKLIST, "");
optionalWhitelistBlacklist = AbstractAvroToOrcConverter.getViewWhiteBackListFromWorkUnit(wus);
Assert.assertTrue(optionalWhitelistBlacklist.isPresent(),
"Whitelist blacklist specified in WUS, WhiteListBlackList object should be present");
Assert.assertTrue(optionalWhitelistBlacklist.get().acceptDb("mydb"));
Assert.assertTrue(optionalWhitelistBlacklist.get().acceptTable("mydb", "mytable"));
wus.setProp(AbstractAvroToOrcConverter.HIVE_CONVERSION_VIEW_REGISTRATION_WHITELIST, "yourdb");
wus.setProp(AbstractAvroToOrcConverter.HIVE_CONVERSION_VIEW_REGISTRATION_BLACKLIST, "");
optionalWhitelistBlacklist = AbstractAvroToOrcConverter.getViewWhiteBackListFromWorkUnit(wus);
Assert.assertTrue(optionalWhitelistBlacklist.isPresent(),
"Whitelist blacklist specified in WUS, WhiteListBlackList object should be present");
Assert.assertTrue(!optionalWhitelistBlacklist.get().acceptDb("mydb"));
Assert.assertTrue(!optionalWhitelistBlacklist.get().acceptTable("mydb", "mytable"));
Assert.assertTrue(optionalWhitelistBlacklist.get().acceptDb("yourdb"));
Assert.assertTrue(optionalWhitelistBlacklist.get().acceptTable("yourdb", "mytable"));
wus.setProp(AbstractAvroToOrcConverter.HIVE_CONVERSION_VIEW_REGISTRATION_WHITELIST, "yourdb.yourtable");
wus.setProp(AbstractAvroToOrcConverter.HIVE_CONVERSION_VIEW_REGISTRATION_BLACKLIST, "");
optionalWhitelistBlacklist = AbstractAvroToOrcConverter.getViewWhiteBackListFromWorkUnit(wus);
Assert.assertTrue(optionalWhitelistBlacklist.isPresent(),
"Whitelist blacklist specified in WUS, WhiteListBlackList object should be present");
Assert.assertTrue(!optionalWhitelistBlacklist.get().acceptDb("mydb"));
Assert.assertTrue(!optionalWhitelistBlacklist.get().acceptTable("yourdb", "mytable"));
Assert.assertTrue(optionalWhitelistBlacklist.get().acceptDb("yourdb"));
Assert.assertTrue(optionalWhitelistBlacklist.get().acceptTable("yourdb", "yourtable"));
wus.setProp(AbstractAvroToOrcConverter.HIVE_CONVERSION_VIEW_REGISTRATION_WHITELIST, "");
wus.setProp(AbstractAvroToOrcConverter.HIVE_CONVERSION_VIEW_REGISTRATION_BLACKLIST, "yourdb.yourtable");
optionalWhitelistBlacklist = AbstractAvroToOrcConverter.getViewWhiteBackListFromWorkUnit(wus);
Assert.assertTrue(optionalWhitelistBlacklist.isPresent(),
"Whitelist blacklist specified in WUS, WhiteListBlackList object should be present");
Assert.assertTrue(optionalWhitelistBlacklist.get().acceptDb("mydb"));
Assert.assertTrue(optionalWhitelistBlacklist.get().acceptTable("yourdb", "mytable"));
Assert.assertTrue(optionalWhitelistBlacklist.get().acceptDb("yourdb"));
Assert.assertTrue(!optionalWhitelistBlacklist.get().acceptTable("yourdb", "yourtable"));
}
}
| 2,322 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive/util/HiveAvroORCQueryGeneratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.util;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.hadoop.hive.metastore.api.Table;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import org.apache.gobblin.data.management.ConversionHiveTestUtils;
import org.apache.gobblin.data.management.conversion.hive.query.HiveAvroORCQueryGenerator;
import org.apache.gobblin.util.AvroFlattener;
import static org.apache.gobblin.data.management.conversion.hive.utils.AvroHiveTypeUtils.generateAvroToHiveColumnMapping;
@Test(groups = { "gobblin.data.management.conversion" })
public class HiveAvroORCQueryGeneratorTest {
private static String resourceDir = "avroToOrcQueryUtilsTest";
private static Optional<Table> destinationTableMeta = Optional.absent();
private static boolean isEvolutionEnabled = true;
private static Optional<Integer> rowLimit = Optional.absent();
/**
* Testing DDL generation for schema containing logical types.
* DDL comparison doesn't include any spacing and blank.
* @throws Exception
*/
public void testLogicalTypeResolutionWithDDL() throws Exception {
String schemaName = "schemaWithLogicalFieldDDL";
Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir,
"schemaWithLogicalField.json");
String q = HiveAvroORCQueryGenerator
.generateCreateTableDDL(schema, schemaName, "file:/user/hive/warehouse/" + schemaName,
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(),
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(),
Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(),
null, isEvolutionEnabled, true, destinationTableMeta,
new HashMap<String, String>());
/**
* This unit has a known flaw: Due to the fact that hive-1.0.1 does not support "Date" as the logical type,
* the "date" type is not being recognized by Hive's library when translating Avro schema to
* TypeInfo( An TypeDescription equivalent). Therefore in schemaWithLogicalField.ddl, for the `nestedLogicalFieldDate`
* value in `columns.types` as part of tableProperties, we will use "int" --- the physical type of date instead of "date"
*/
Assert.assertEquals(q.trim().replaceAll("\\s+",""),
ConversionHiveTestUtils.readQueryFromFile(resourceDir, "schemaWithLogicalField.ddl").trim().replaceAll("\\s+",""));
}
/***
* Test DDL generation for schema structured as: Array within record within array within record
* @throws IOException
*/
@Test
public void testArrayWithinRecordWithinArrayWithinRecordDDL() throws IOException {
String schemaName = "testArrayWithinRecordWithinArrayWithinRecordDDL";
Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir,
"arrayWithinRecordWithinArrayWithinRecord_nested.json");
String q = HiveAvroORCQueryGenerator
.generateCreateTableDDL(schema, schemaName, "file:/user/hive/warehouse/" + schemaName,
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(),
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(),
Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(),
null, isEvolutionEnabled, true, destinationTableMeta,
new HashMap<String, String>());
Assert.assertEquals(q,
ConversionHiveTestUtils.readQueryFromFile(resourceDir, "arrayWithinRecordWithinArrayWithinRecord_nested.ddl"));
}
/***
* Test DDL generation for schema structured as: option within option within record
* @throws IOException
*/
@Test
public void testOptionWithinOptionWithinRecordDDL() throws IOException {
String schemaName = "testOptionWithinOptionWithinRecordDDL";
Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir,
"optionWithinOptionWithinRecord_nested.json");
String q = HiveAvroORCQueryGenerator
.generateCreateTableDDL(schema, schemaName, "file:/user/hive/warehouse/" + schemaName,
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(),
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(),
Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(),
null, isEvolutionEnabled, true, destinationTableMeta,
new HashMap<String, String>());
Assert.assertEquals(q,
ConversionHiveTestUtils.readQueryFromFile(resourceDir, "optionWithinOptionWithinRecord_nested.ddl"));
}
/***
* Test DDL generation for schema structured as: record within option within record
* @throws IOException
*/
@Test
public void testRecordWithinOptionWithinRecordDDL() throws IOException {
String schemaName = "testRecordWithinOptionWithinRecordDDL";
Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir,
"recordWithinOptionWithinRecord_nested.json");
String q = HiveAvroORCQueryGenerator
.generateCreateTableDDL(schema, schemaName, "file:/user/hive/warehouse/" + schemaName,
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(),
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(),
Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(),
null, isEvolutionEnabled, true, destinationTableMeta,
new HashMap<String, String>());
Assert.assertEquals(q.trim(),
ConversionHiveTestUtils.readQueryFromFile(resourceDir, "recordWithinOptionWithinRecord_nested.ddl"));
}
/***
* Test DDL generation for schema structured as: record within record within record
* @throws IOException
*/
@Test
public void testRecordWithinRecordWithinRecordDDL() throws IOException {
String schemaName = "testRecordWithinRecordWithinRecordDDL";
Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir,
"recordWithinRecordWithinRecord_nested.json");
String q = HiveAvroORCQueryGenerator
.generateCreateTableDDL(schema, schemaName, "file:/user/hive/warehouse/" + schemaName,
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(),
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(),
Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(),
null, isEvolutionEnabled, true, destinationTableMeta,
new HashMap<String, String>());
Assert.assertEquals(q.trim(),
ConversionHiveTestUtils.readQueryFromFile(resourceDir, "recordWithinRecordWithinRecord_nested.ddl"));
}
/***
* Test DDL generation for schema structured as: record within record within record after flattening
* @throws IOException
*/
@Test
public void testRecordWithinRecordWithinRecordFlattenedDDL() throws IOException {
String schemaName = "testRecordWithinRecordWithinRecordDDL";
Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir, "recordWithinRecordWithinRecord_nested.json");
AvroFlattener avroFlattener = new AvroFlattener();
Schema flattenedSchema = avroFlattener.flatten(schema, true);
String q = HiveAvroORCQueryGenerator
.generateCreateTableDDL(flattenedSchema, schemaName, "file:/user/hive/warehouse/" + schemaName,
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(),
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(),
Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(), null, isEvolutionEnabled, true,
destinationTableMeta, new HashMap<String, String>());
Assert.assertEquals(q,
ConversionHiveTestUtils.readQueryFromFile(resourceDir, "recordWithinRecordWithinRecord_flattened.ddl"));
}
/***
* Test DML generation
* @throws IOException
*/
@Test
public void testRecordWithinRecordWithinRecordFlattenedDML() throws IOException {
String schemaName = "testRecordWithinRecordWithinRecordDDL";
Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir,
"recordWithinRecordWithinRecord_nested.json");
AvroFlattener avroFlattener = new AvroFlattener();
Schema flattenedSchema = avroFlattener.flatten(schema, true);
String q = HiveAvroORCQueryGenerator
.generateTableMappingDML(schema, flattenedSchema, schemaName, schemaName + "_orc", Optional.<String>absent(),
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<Boolean>absent(),
Optional.<Boolean>absent(), isEvolutionEnabled, destinationTableMeta, rowLimit);
Assert.assertEquals(q.trim(),
ConversionHiveTestUtils.readQueryFromFile(resourceDir, "recordWithinRecordWithinRecord.dml"));
}
/***
* Test Multi-partition DDL generation
* @throws IOException
*/
@Test
public void testMultiPartitionDDL() throws IOException {
String schemaName = "testMultiPartitionDDL";
Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir, "recordWithinRecordWithinRecord_nested.json");
AvroFlattener avroFlattener = new AvroFlattener();
Schema flattenedSchema = avroFlattener.flatten(schema, true);
Map<String, String> partitionDDLInfo = ImmutableMap.of("datepartition", "string", "id", "int", "country", "string");
String q = HiveAvroORCQueryGenerator
.generateCreateTableDDL(flattenedSchema, schemaName, "file:/user/hive/warehouse/" + schemaName,
Optional.<String>absent(), Optional.of(partitionDDLInfo), Optional.<List<String>>absent(),
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(),
Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(), null, isEvolutionEnabled, true,
destinationTableMeta, new HashMap<String, String>());
Assert.assertEquals(q,
ConversionHiveTestUtils.readQueryFromFile(resourceDir, "testMultiPartition.ddl"));
}
/***
* Test Multi-partition DML generation
* @throws IOException
*/
@Test
public void testMultiPartitionDML() throws IOException {
String schemaName = "testMultiPartitionDML";
Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir,
"recordWithinRecordWithinRecord_nested.json");
AvroFlattener avroFlattener = new AvroFlattener();
Schema flattenedSchema = avroFlattener.flatten(schema, true);
Map<String, String> partitionDMLInfo = ImmutableMap.of("datepartition", "2016-01-01", "id", "101", "country", "US");
String q = HiveAvroORCQueryGenerator
.generateTableMappingDML(schema, flattenedSchema, schemaName, schemaName + "_orc", Optional.<String>absent(),
Optional.<String>absent(), Optional.of(partitionDMLInfo), Optional.<Boolean>absent(),
Optional.<Boolean>absent(), isEvolutionEnabled, destinationTableMeta, rowLimit);
Assert.assertEquals(q.trim(), ConversionHiveTestUtils.readQueryFromFile(resourceDir, "testMultiPartition.dml"));
}
/***
* Test bad schema
* @throws IOException
*/
@Test(expectedExceptions = IllegalArgumentException.class)
public void testNonRecordRootSchemaDDL() throws Exception {
String schemaName = "nonRecordRootSchema";
Schema schema = Schema.create(Schema.Type.STRING);
HiveAvroORCQueryGenerator
.generateCreateTableDDL(schema, schemaName, "file:/user/hive/warehouse/" + schemaName,
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(),
Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(),
Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(), null, isEvolutionEnabled, true,
destinationTableMeta, new HashMap<String, String>());
}
/***
* Test DML generation with row limit
* @throws IOException
*/
@Test
public void testFlattenedDMLWithRowLimit() throws IOException {
String schemaName = "testRecordWithinRecordWithinRecordDDL";
Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir,
"recordWithinRecordWithinRecord_nested.json");
Optional<Integer> rowLimit = Optional.of(1);
AvroFlattener avroFlattener = new AvroFlattener();
Schema flattenedSchema = avroFlattener.flatten(schema, true);
String q = HiveAvroORCQueryGenerator
.generateTableMappingDML(schema, flattenedSchema, schemaName, schemaName + "_orc", Optional.<String>absent(),
Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<Boolean>absent(),
Optional.<Boolean>absent(), isEvolutionEnabled, destinationTableMeta, rowLimit);
Assert.assertEquals(q.trim(), ConversionHiveTestUtils.readQueryFromFile(resourceDir, "flattenedWithRowLimit.dml"));
}
@Test
public void testDropPartitions() throws Exception {
// Test multiple partition-spec drop method
List<Map<String, String>> partitionDMLInfos = Lists.newArrayList();
partitionDMLInfos.add(ImmutableMap.of("datepartition", "2016-01-01", "sizepartition", "10"));
partitionDMLInfos.add(ImmutableMap.of("datepartition", "2016-01-02", "sizepartition", "20"));
partitionDMLInfos.add(ImmutableMap.of("datepartition", "2016-01-03", "sizepartition", "30"));
List<String> ddl = HiveAvroORCQueryGenerator.generateDropPartitionsDDL("db1", "table1", partitionDMLInfos);
Assert.assertEquals(ddl.size(), 2);
Assert.assertEquals(ddl.get(0), "USE db1 \n");
Assert.assertEquals(ddl.get(1),
"ALTER TABLE table1 DROP IF EXISTS PARTITION (datepartition='2016-01-01',sizepartition='10'), "
+ "PARTITION (datepartition='2016-01-02',sizepartition='20'), "
+ "PARTITION (datepartition='2016-01-03',sizepartition='30')");
// Check empty partitions
Assert.assertEquals(HiveAvroORCQueryGenerator.generateDropPartitionsDDL("db1", "table1",
Collections.<Map<String, String>>emptyList()), Collections.emptyList());
// Test single partition-spec drop method
Map<String, String> partitionsDMLInfo = ImmutableMap.of("datepartition", "2016-01-01", "sizepartition", "10");
ddl = HiveAvroORCQueryGenerator.generateDropPartitionsDDL("db1", "table1", partitionsDMLInfo);
Assert.assertEquals(ddl.size(), 2);
Assert.assertEquals(ddl.get(0), "USE db1\n");
Assert.assertEquals(ddl.get(1),
"ALTER TABLE table1 DROP IF EXISTS PARTITION (`datepartition`='2016-01-01', `sizepartition`='10') ");
}
@Test
public void testCreatePartitionDDL() throws Exception {
List<String> ddl = HiveAvroORCQueryGenerator.generateCreatePartitionDDL("db1", "table1", "/tmp",
ImmutableMap.of("datepartition", "2016-01-01", "sizepartition", "10"));
Assert.assertEquals(ddl.size(), 2);
Assert.assertEquals(ddl.get(0), "USE db1\n");
Assert.assertEquals(ddl.get(1),
"ALTER TABLE `table1` ADD IF NOT EXISTS PARTITION (`datepartition`='2016-01-01', `sizepartition`='10') \n"
+ " LOCATION '/tmp' ");
}
@Test
public void testDropTableDDL() throws Exception {
String ddl = HiveAvroORCQueryGenerator.generateDropTableDDL("db1", "table1");
Assert.assertEquals(ddl, "DROP TABLE IF EXISTS `db1`.`table1`");
}
@Test
public void testAvroToHiveTypeMapping() throws Exception {
// test for record, this record-schema will be reused in the tests afterwards.
Schema record_1 =
Schema.createRecord("record_1","","", false, ImmutableList.<Schema.Field>of(
new Schema.Field("a", Schema.create(Schema.Type.LONG), "", null),
new Schema.Field("b", Schema.create(Schema.Type.BOOLEAN), "", null)
));
String hiveSchema_1 = generateAvroToHiveColumnMapping(record_1, Optional.absent(), false, "");
// the backtick was added on purpose to avoid preserved keywords appearing as part of column name
String expectedHiveSchema_1 = "struct<`a`:bigint,`b`:boolean>";
org.junit.Assert.assertEquals(hiveSchema_1, expectedHiveSchema_1);
// test for union (fake union, actually represents default value)
Schema union_1 = Schema.createUnion(Schema.create(Schema.Type.NULL), record_1);
String hiveSchema_2 = generateAvroToHiveColumnMapping(union_1, Optional.absent(), false, "");
String expectedHiveSchema_2 = "struct<`a`:bigint,`b`:boolean>";
org.junit.Assert.assertEquals(hiveSchema_2, expectedHiveSchema_2);
// test for array
Schema array_1 = Schema.createArray(record_1);
String hiveSchema_3 = generateAvroToHiveColumnMapping(array_1, Optional.absent(), false, "");
String expectedHiveSchema_3 = "array<struct<`a`:bigint,`b`:boolean>>";
org.junit.Assert.assertEquals(hiveSchema_3, expectedHiveSchema_3);
// test for map
Schema map_1 = Schema.createMap(array_1);
String hiveSchema_4 = generateAvroToHiveColumnMapping(map_1, Optional.absent(), false, "");
String expectedHiveSchema_4 = "map<string,array<struct<`a`:bigint,`b`:boolean>>>";
org.junit.Assert.assertEquals(hiveSchema_4, expectedHiveSchema_4);
}
@Test
public void testHiveTypeEscaping() throws Exception {
String type = "array<struct<singleItems:array<struct<scoredEntity:struct<id:string,score:float,"
+ "sourceName:string,sourceModel:string>,scores:struct<fprScore:double,fprUtility:double,"
+ "calibratedFprUtility:double,sprScore:double,adjustedSprScore:double,sprUtility:double>,"
+ "sponsoredFlag:string,blendingRequestId:string,forExploration:boolean,d2Resource:string,"
+ "restliFinder:string,trackingId:binary,aggregation:struct<positionInAggregation:struct<index:int>,"
+ "typeOfAggregation:string>,decoratedFeedUpdateData:struct<avoData:struct<actorUrn:string,verbType:"
+ "string,objectUrn:string,objectType:string>,attributedActivityUrn:string,createdTime:bigint,totalLikes:"
+ "bigint,totalComments:bigint,rootActivity:struct<activityUrn:string,avoData:struct<actorUrn:string,"
+ "verbType:string,objectUrn:string,objectType:string>>>>>,scores:struct<fprScore:double,fprUtility:double,"
+ "calibratedFprUtility:double,sprScore:double,adjustedSprScore:double,sprUtility:double>,position:int>>";
String expectedEscapedType = "array<struct<`singleItems`:array<struct<`scoredEntity`:struct<`id`:string,"
+ "`score`:float,`sourceName`:string,`sourceModel`:string>,`scores`:struct<`fprScore`:double,"
+ "`fprUtility`:double,`calibratedFprUtility`:double,`sprScore`:double,`adjustedSprScore`:double,"
+ "`sprUtility`:double>,`sponsoredFlag`:string,`blendingRequestId`:string,`forExploration`:boolean,"
+ "`d2Resource`:string,`restliFinder`:string,`trackingId`:binary,`aggregation`:struct<`positionInAggregation`"
+ ":struct<`index`:int>,`typeOfAggregation`:string>,`decoratedFeedUpdateData`:struct<`avoData`:"
+ "struct<`actorUrn`:string,`verbType`:string,`objectUrn`:string,`objectType`:string>,`attributedActivityUrn`"
+ ":string,`createdTime`:bigint,`totalLikes`:bigint,`totalComments`:bigint,`rootActivity`:struct<`activityUrn`"
+ ":string,`avoData`:struct<`actorUrn`:string,`verbType`:string,`objectUrn`:string,`objectType`:string>>>>>,"
+ "`scores`:struct<`fprScore`:double,`fprUtility`:double,`calibratedFprUtility`:double,`sprScore`:double,"
+ "`adjustedSprScore`:double,`sprUtility`:double>,`position`:int>>";
String actualEscapedType = HiveAvroORCQueryGenerator.escapeHiveType(type);
Assert.assertEquals(actualEscapedType, expectedEscapedType);
}
@Test
public void testValidTypeEvolution() throws Exception {
// Check a few evolved types
Assert.assertTrue(HiveAvroORCQueryGenerator.isTypeEvolved("float", "int"));
Assert.assertTrue(HiveAvroORCQueryGenerator.isTypeEvolved("double", "float"));
Assert.assertTrue(HiveAvroORCQueryGenerator.isTypeEvolved("string", "varchar"));
Assert.assertTrue(HiveAvroORCQueryGenerator.isTypeEvolved("double", "string"));
// Check if type is same
Assert.assertFalse(HiveAvroORCQueryGenerator.isTypeEvolved("int", "int"));
}
@Test (expectedExceptions = RuntimeException.class)
public void testInvalidTypeEvolution() throws Exception {
// Check for in-compatible types
HiveAvroORCQueryGenerator.isTypeEvolved("boolean", "int");
}
@Test
public void testCreateOrUpdateViewDDL() throws Exception {
// Check if two queries for Create and Update View have been generated
List<String> ddls = HiveAvroORCQueryGenerator.generateCreateOrUpdateViewDDL("db1", "tbl1", "db2" ,"view1", true);
Assert.assertEquals(ddls.size(), 2, "Two queries for Create and Update should have been generated");
Assert.assertEquals(ddls.get(0), "CREATE VIEW IF NOT EXISTS `db2`.`view1` AS SELECT * FROM `db1`.`tbl1`");
Assert.assertEquals(ddls.get(1), "ALTER VIEW `db2`.`view1` AS SELECT * FROM `db1`.`tbl1`");
// Check if two queries for Create and Update View have been generated
ddls = HiveAvroORCQueryGenerator.generateCreateOrUpdateViewDDL("db1", "tbl1", "db2" ,"view1", false);
Assert.assertEquals(ddls.size(), 1, "One query for Create only should have been generated");
Assert.assertEquals(ddls.get(0), "CREATE VIEW IF NOT EXISTS `db2`.`view1` AS SELECT * FROM `db1`.`tbl1`");
}
}
| 2,323 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive/dataset/ConvertibleHiveDatasetTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.dataset;
import com.google.common.base.Optional;
import java.io.InputStream;
import java.lang.reflect.Type;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.gobblin_scopes.JobScopeInstance;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.publisher.HiveConvertPublisher;
import org.apache.gobblin.data.management.conversion.hive.source.HiveAvroToOrcSource;
import org.apache.gobblin.data.management.conversion.hive.source.HiveWorkUnit;
import org.apache.gobblin.data.management.conversion.hive.utils.LineageUtils;
import org.apache.gobblin.dataset.DatasetDescriptor;
import org.apache.gobblin.dataset.Descriptor;
import org.apache.gobblin.dataset.HiveToHdfsDatasetResolver;
import org.apache.gobblin.dataset.HiveToHdfsDatasetResolverFactory;
import org.apache.gobblin.metrics.event.lineage.LineageEventBuilder;
import org.apache.gobblin.metrics.event.lineage.LineageInfo;
import org.apache.gobblin.runtime.TaskState;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.gson.reflect.TypeToken;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset.ConversionConfig;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.io.GsonInterfaceAdapter;
import static org.mockito.Mockito.when;
@Test(groups = { "gobblin.data.management.conversion" })
public class ConvertibleHiveDatasetTest {
/** Lineage info ser/de */
private static final Type DESCRIPTOR_LIST_TYPE = new TypeToken<ArrayList<Descriptor>>(){}.getType();
private static final Gson GSON =
new GsonBuilder().registerTypeAdapterFactory(new GsonInterfaceAdapter(Descriptor.class)).create();
/**
* Test if lineage information is properly set in the workunit for convertible hive datasets
*/
@Test
public void testLineageInfo() throws Exception {
String testConfFilePath = "convertibleHiveDatasetTest/flattenedAndNestedOrc.conf";
Config config = ConfigFactory.parseResources(testConfFilePath).getConfig("hive.conversion.avro");
// Set datasetResolverFactory to convert Hive Lineage event to Hdfs Lineage event
ConvertibleHiveDataset testConvertibleDataset = createTestConvertibleDataset(config);
HiveWorkUnit workUnit = new HiveWorkUnit(testConvertibleDataset);
workUnit.setProp("gobblin.broker.lineageInfo.datasetResolverFactory",
HiveToHdfsDatasetResolverFactory.class.getName());
workUnit.setProp(ConfigurationKeys.JOB_ID_KEY, "123456");
Optional<LineageInfo> lineageInfo = LineageInfo.getLineageInfo(getSharedJobBroker(workUnit.getProperties()));
HiveAvroToOrcSource src = new HiveAvroToOrcSource();
Assert.assertTrue(LineageUtils.shouldSetLineageInfo(workUnit));
if (LineageUtils.shouldSetLineageInfo(workUnit)) {
src.setSourceLineageInfo(workUnit,
lineageInfo);
}
// TaskState is passed to the publisher, hence test should mimic the same behavior
TaskState taskState = new TaskState(new WorkUnitState(workUnit));
if (LineageUtils.shouldSetLineageInfo(taskState)) {
HiveConvertPublisher.setDestLineageInfo(taskState, lineageInfo);
}
Properties props = taskState.getProperties();
// Assert that there are two eventBuilders for nestedOrc and flattenedOrc
Collection<LineageEventBuilder> lineageEventBuilders = LineageInfo.load(Collections.singleton(taskState));
Assert.assertEquals(lineageEventBuilders.size(), 2);
// Asset that lineage name is correct
Assert.assertEquals(props.getProperty("gobblin.event.lineage.name"), "/tmp/test");
// Assert that source is correct for lineage event
Assert.assertTrue(props.containsKey("gobblin.event.lineage.source"));
DatasetDescriptor sourceDD =
GSON.fromJson(props.getProperty("gobblin.event.lineage.source"), DatasetDescriptor.class);
Assert.assertEquals(sourceDD.getPlatform(), "file");
Assert.assertEquals(sourceDD.getName(), "/tmp/test");
Assert.assertEquals(sourceDD.getMetadata().get(HiveToHdfsDatasetResolver.HIVE_TABLE), "db1.tb1");
// Assert that first dest is correct for lineage event
Assert.assertTrue(props.containsKey("gobblin.event.lineage.branch.1.destination"));
DatasetDescriptor destDD1 =
(DatasetDescriptor) firstDescriptor(props, "gobblin.event.lineage.branch.1.destination");
Assert.assertEquals(destDD1.getPlatform(), "file");
Assert.assertEquals(destDD1.getName(), "/tmp/data_nestedOrc/db1/tb1/final");
Assert.assertEquals(destDD1.getMetadata().get(HiveToHdfsDatasetResolver.HIVE_TABLE),
"db1_nestedOrcDb.tb1_nestedOrc");
// Assert that second dest is correct for lineage event
Assert.assertTrue(props.containsKey("gobblin.event.lineage.branch.2.destination"));
DatasetDescriptor destDD2 =
(DatasetDescriptor) firstDescriptor(props, "gobblin.event.lineage.branch.2.destination");
Assert.assertEquals(destDD2.getPlatform(), "file");
Assert.assertEquals(destDD2.getName(), "/tmp/data_flattenedOrc/db1/tb1/final");
Assert.assertEquals(destDD2.getMetadata().get(HiveToHdfsDatasetResolver.HIVE_TABLE),
"db1_flattenedOrcDb.tb1_flattenedOrc");
}
private Descriptor firstDescriptor(Properties prop, String destinationKey) {
List<Descriptor> descriptors = GSON.fromJson(prop.getProperty(destinationKey), DESCRIPTOR_LIST_TYPE);
return descriptors.get(0);
}
@Test
public void testFlattenedOrcConfig() throws Exception {
String testConfFilePath = "convertibleHiveDatasetTest/flattenedOrc.conf";
Config config = ConfigFactory.parseResources(testConfFilePath).getConfig("hive.conversion.avro");
ConvertibleHiveDataset cd = createTestConvertibleDataset(config);
Assert.assertEquals(cd.getDestFormats(), ImmutableSet.of("flattenedOrc"));
Assert.assertTrue(cd.getConversionConfigForFormat("flattenedOrc").isPresent());
validateFlattenedConfig(cd.getConversionConfigForFormat("flattenedOrc").get());
}
@Test
public void testFlattenedAndNestedOrcConfig() throws Exception {
String testConfFilePath = "convertibleHiveDatasetTest/flattenedAndNestedOrc.conf";
Config config = ConfigFactory.parseResources(testConfFilePath).getConfig("hive.conversion.avro");
ConvertibleHiveDataset cd = createTestConvertibleDataset(config);
Assert.assertEquals(cd.getDestFormats(), ImmutableSet.of("flattenedOrc", "nestedOrc"));
Assert.assertTrue(cd.getConversionConfigForFormat("flattenedOrc").isPresent());
Assert.assertTrue(cd.getConversionConfigForFormat("nestedOrc").isPresent());
validateFlattenedConfig(cd.getConversionConfigForFormat("flattenedOrc").get());
validateNestedOrc(cd.getConversionConfigForFormat("nestedOrc").get());
}
@Test
public void testFlattenedAndNestedOrcProps() throws Exception {
String testConfFilePath = "convertibleHiveDatasetTest/flattenedAndNestedOrc.properties";
Properties jobProps = new Properties();
try (final InputStream stream = ConvertibleHiveDatasetTest.class.getClassLoader().getResourceAsStream(testConfFilePath)) {
jobProps.load(stream);
}
Config config = ConfigUtils.propertiesToConfig(jobProps).getConfig("hive.conversion.avro");
ConvertibleHiveDataset cd = createTestConvertibleDataset(config);
Assert.assertEquals(cd.getDestFormats(), ImmutableSet.of("flattenedOrc", "nestedOrc"));
Assert.assertTrue(cd.getConversionConfigForFormat("flattenedOrc").isPresent());
Assert.assertTrue(cd.getConversionConfigForFormat("nestedOrc").isPresent());
validateFlattenedConfig(cd.getConversionConfigForFormat("flattenedOrc").get());
validateNestedOrc(cd.getConversionConfigForFormat("nestedOrc").get());
}
@Test
public void testInvalidFormat()
throws Exception {
Config config = ConfigFactory.parseMap(ImmutableMap.<String, String>of("destinationFormats", "flattenedOrc,nestedOrc"));
ConvertibleHiveDataset cd = createTestConvertibleDataset(config);
Assert.assertFalse(cd.getConversionConfigForFormat("invalidFormat").isPresent());
}
@Test
public void testDisableFormat()
throws Exception {
Config config = ConfigFactory.parseMap(ImmutableMap.<String, String> builder()
.put("destinationFormats", "flattenedOrc")
.put("flattenedOrc.destination.tableName","d")
.put("flattenedOrc.destination.dbName","d")
.put("flattenedOrc.destination.dataPath","d")
.put("nestedOrc.destination.tableName","d")
.put("nestedOrc.destination.dbName","d")
.put("nestedOrc.destination.dataPath","d")
.build());
ConvertibleHiveDataset cd = createTestConvertibleDataset(config);
Assert.assertTrue(cd.getConversionConfigForFormat("flattenedOrc").isPresent());
Assert.assertFalse(cd.getConversionConfigForFormat("nestedOrc").isPresent());
}
private void validateFlattenedConfig(ConversionConfig conversionConfig) {
Assert.assertEquals(conversionConfig.getDestinationDbName(), "db1_flattenedOrcDb");
Assert.assertEquals(conversionConfig.getDestinationTableName(), "tb1_flattenedOrc");
Assert.assertEquals(conversionConfig.getDestinationDataPath(), "/tmp/data_flattenedOrc/db1/tb1");
Assert.assertEquals(conversionConfig.getClusterBy(), ImmutableList.of("c1", "c2"));
Assert.assertEquals(conversionConfig.getNumBuckets().get(), Integer.valueOf(4));
Properties hiveProps = new Properties();
hiveProps.setProperty("mapred.map.tasks", "10,12");
hiveProps.setProperty("hive.merge.mapfiles", "false");
Assert.assertEquals(conversionConfig.getHiveRuntimeProperties(), hiveProps);
}
private void validateNestedOrc(ConversionConfig conversionConfig) {
Assert.assertEquals(conversionConfig.getDestinationDbName(), "db1_nestedOrcDb");
Assert.assertEquals(conversionConfig.getDestinationTableName(), "tb1_nestedOrc");
Assert.assertEquals(conversionConfig.getDestinationViewName().get(), "tb1_view");
Assert.assertEquals(conversionConfig.getDestinationDataPath(), "/tmp/data_nestedOrc/db1/tb1");
Assert.assertEquals(conversionConfig.isUpdateViewAlwaysEnabled(), false);
Assert.assertEquals(conversionConfig.getClusterBy(), ImmutableList.of("c3", "c4"));
Assert.assertEquals(conversionConfig.getNumBuckets().get(), Integer.valueOf(5));
Properties hiveProps = new Properties();
hiveProps = new Properties();
hiveProps.setProperty("mapred.map.tasks", "12");
Assert.assertEquals(conversionConfig.getHiveRuntimeProperties(), hiveProps);
}
public static ConvertibleHiveDataset createTestConvertibleDataset(Config config)
throws URISyntaxException {
Table table = getTestTable("db1", "tb1");
FileSystem mockFs = Mockito.mock(FileSystem.class);
when(mockFs.getUri()).thenReturn(new URI("test"));
ConvertibleHiveDataset cd =
new ConvertibleHiveDataset(mockFs, Mockito.mock(HiveMetastoreClientPool.class), new org.apache.hadoop.hive.ql.metadata.Table(
table), new Properties(), config);
return cd;
}
public static Table getTestTable(String dbName, String tableName) {
Table table = new Table();
table.setDbName(dbName);
table.setTableName(tableName);
table.setTableType(TableType.EXTERNAL_TABLE.name());
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation("/tmp/test");
table.setSd(sd);
return table;
}
public static SharedResourcesBroker<GobblinScopeTypes> getSharedJobBroker(Properties props) {
SharedResourcesBroker<GobblinScopeTypes> instanceBroker = SharedResourcesBrokerFactory
.createDefaultTopLevelBroker(ConfigFactory.parseProperties(props), GobblinScopeTypes.GLOBAL.defaultScopeInstance());
SharedResourcesBroker<GobblinScopeTypes> jobBroker = instanceBroker
.newSubscopedBuilder(new JobScopeInstance("ConvertibleHiveDatasetLineageEventTest", String.valueOf(System.currentTimeMillis())))
.build();
return jobBroker;
}
}
| 2,324 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive/watermarker/TableLevelWatermarkerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.watermarker;
import java.util.List;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
@Test(groups = { "gobblin.data.management.conversion" })
public class TableLevelWatermarkerTest {
@Test
public void testPreviousState() throws Exception {
WorkUnitState previousWus = new WorkUnitState();
previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "test_table");
previousWus.setActualHighWatermark(new LongWatermark(100l));
// Watermark will be lowest of 100l and 101l
WorkUnitState previousWus1 = new WorkUnitState();
previousWus1.setProp(ConfigurationKeys.DATASET_URN_KEY, "test_table");
previousWus1.setActualHighWatermark(new LongWatermark(101l));
SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus));
TableLevelWatermarker watermarker = new TableLevelWatermarker(state);
Assert.assertEquals(watermarker.getPreviousHighWatermark(mockTable("test_table")), new LongWatermark(100l));
}
@Test
public void testPreviousStateWithPartitionWatermark() throws Exception {
WorkUnitState previousWus = new WorkUnitState();
previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "test_table");
previousWus.setActualHighWatermark(new LongWatermark(100l));
// Watermark workunits created by PartitionLevelWatermarker need to be ignored.
WorkUnitState previousWus1 = new WorkUnitState();
previousWus1.setProp(ConfigurationKeys.DATASET_URN_KEY, "test_table");
previousWus1.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true);
previousWus1.setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("part1", 200l)));
SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus));
TableLevelWatermarker watermarker = new TableLevelWatermarker(state);
Assert.assertEquals(watermarker.getPreviousHighWatermark(mockTable("test_table")), new LongWatermark(100l));
}
/**
* Make sure that all partitions get the same previous high watermark (table's watermark)
*/
@Test
public void testPartitionWatermarks() throws Exception {
WorkUnitState previousWus = new WorkUnitState();
previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "test_table");
previousWus.setActualHighWatermark(new LongWatermark(100l));
SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus));
TableLevelWatermarker watermarker = new TableLevelWatermarker(state);
Table mockTable = mockTable("test_table");
Assert.assertEquals(watermarker.getPreviousHighWatermark(mockTable), new LongWatermark(100l));
Assert.assertEquals(watermarker.getPreviousHighWatermark(mockPartition(mockTable, ImmutableList.of("2015"))),
new LongWatermark(100l));
Assert.assertEquals(watermarker.getPreviousHighWatermark(mockPartition(mockTable, ImmutableList.of("2016"))),
new LongWatermark(100l));
}
private static Table mockTable(String name) {
Table table = Mockito.mock(Table.class, Mockito.RETURNS_SMART_NULLS);
Mockito.when(table.getCompleteName()).thenReturn(name);
return table;
}
private static Partition mockPartition(Table table, List<String> values) {
Partition partition = Mockito.mock(Partition.class, Mockito.RETURNS_SMART_NULLS);
Mockito.when(partition.getTable()).thenReturn(table);
Mockito.when(partition.getValues()).thenReturn(values);
return partition;
}
}
| 2,325 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive/watermarker/PartitionLevelWatermarkerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.watermarker;
import com.google.common.base.Optional;
import com.google.common.io.Files;
import org.apache.gobblin.data.management.conversion.hive.LocalHiveMetastoreTestUtils;
import java.io.File;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.joda.time.DateTime;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.converter.AbstractAvroToOrcConverter;
import org.apache.gobblin.data.management.conversion.hive.source.HiveSource;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.workunit.WorkUnit;
/*
* Tests covered
*
* // Previous state tests
* 1. Test previousWatermarks reading.
* 1.5 Null previous watermarks in a watermark workunit
* 2. Test more than one watermark workunits
* 3 Get previous high watermark for partition
*
* // Callback tests
* 4. Test calling onPartitionProcessBegin before OnTableProcessBegin
* 5. Test remove dropped partitions
* 6. Test addPartitionWatermarks
*
* // OnGetWorkunitsEnd tests
* 7. No previous state. 5 most recently modified partitions
* 8. Previous state 3. New partitions 3. 2 from new state retained
* 9. Previous state 4. New partitions 5. All 5 new retained
* 10. Previous state 5. New partitions 3; 2 existing and 1 new
* 11. Previous state 3, 2 dropped. New partitions 2
*/
@Test(groups = { "gobblin.data.management.conversion" })
public class PartitionLevelWatermarkerTest {
@Test
public void testExpectedHighWatermarkNoPreviousState() throws Exception {
String dbName = "testExpectedHighWatermarkNoPreviousState";
LocalHiveMetastoreTestUtils.getInstance().dropDatabaseIfExists(dbName);
long now = new DateTime().getMillis();
SourceState state = new SourceState();
PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);
Table table = localTestTable(dbName, "testTable1", true);
Partition part1 = localTestPartition(table, Lists.newArrayList("2015"));
watermarker.onTableProcessBegin(table, 0l);
watermarker.onPartitionProcessBegin(part1, 0l, now + 2015l);
Table table2 = localTestTable(dbName, "testTable2", true);
Partition part2 = localTestPartition(table2, Lists.newArrayList("2016"));
watermarker.onTableProcessBegin(table2, 0l);
watermarker.onPartitionProcessBegin(part2, 0l, now + 16l);
List<WorkUnit> workunits = Lists.newArrayList();
watermarker.onGetWorkunitsEnd(workunits);
Assert.assertEquals(watermarker.getPreviousHighWatermark(part1).getValue(), 0l);
Assert.assertEquals(watermarker.getPreviousHighWatermark(table).getValue(), 0l);
Assert.assertEquals(watermarker.getPreviousHighWatermark(part2).getValue(), 0l);
Assert.assertEquals(watermarker.getPreviousHighWatermark(table2).getValue(), 0l);
Assert.assertEquals(workunits.size(), 2);
Assert.assertEquals(workunits.get(0).getPropAsBoolean(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY), true);
Assert.assertEquals(workunits.get(1).getPropAsBoolean(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY), true);
Collections.sort(workunits, new Comparator<WorkUnit>() {
@Override
public int compare(WorkUnit o1, WorkUnit o2) {
return o1.getProp(ConfigurationKeys.DATASET_URN_KEY).compareTo(o2.getProp(ConfigurationKeys.DATASET_URN_KEY));
}
});
Assert.assertEquals(workunits.get(0).getProp(ConfigurationKeys.DATASET_URN_KEY), table.getCompleteName());
Assert.assertEquals(workunits.get(1).getProp(ConfigurationKeys.DATASET_URN_KEY), table2.getCompleteName());
Assert.assertEquals(workunits.get(0).getExpectedHighWatermark(MultiKeyValueLongWatermark.class).getWatermarks(),
ImmutableMap.of(PartitionLevelWatermarker.partitionKey(part1), now + 2015l));
Assert.assertEquals(workunits.get(1).getExpectedHighWatermark(MultiKeyValueLongWatermark.class).getWatermarks(),
ImmutableMap.of(PartitionLevelWatermarker.partitionKey(part2), now + 16l));
}
@Test
public void testReadPreviousWatermarks() throws Exception {
WorkUnitState previousWus = new WorkUnitState();
previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "test_dataset_urn");
previousWus.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true);
previousWus.setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("2015", 100l, "2016", 101l)));
SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus));
PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);
Assert.assertEquals(watermarker.getPreviousWatermarks().size(), 1);
Assert.assertEquals(watermarker.getPreviousWatermarks().get("test_dataset_urn"),
ImmutableMap.of("2015", 100l, "2016", 101l));
// Make sure all the previousWatermarks are added into current expectedHighWatermarks
Assert.assertEquals(watermarker.getPreviousWatermarks(), watermarker.getExpectedHighWatermarks());
}
@Test
public void testStateStoreReadWrite() throws Exception {
String dbName = "testStateStoreReadWrite";
LocalHiveMetastoreTestUtils.getInstance().dropDatabaseIfExists(dbName);
PartitionLevelWatermarker watermarker0 = new PartitionLevelWatermarker(new SourceState());
Table mockTable = localTestTable(dbName, "table1", true);
watermarker0.onTableProcessBegin(mockTable, 0l);
long now = new DateTime().getMillis();
watermarker0.onPartitionProcessBegin(localTestPartition(mockTable, ImmutableList.of("2016")), 0, now);
List<WorkUnit> workunits = Lists.newArrayList();
watermarker0.onGetWorkunitsEnd(workunits);
@SuppressWarnings("deprecation")
WorkUnitState previousWus = new WorkUnitState(workunits.get(0));
watermarker0.setActualHighWatermark(previousWus);
SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus));
PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);
Assert.assertEquals(watermarker.getPreviousWatermarks().size(), 1);
Assert.assertEquals(watermarker.getPreviousWatermarks().get(dbName + "@table1"), ImmutableMap.of("2016", now));
}
@Test
public void testReadPreviousWatermarksMultipleTables() throws Exception {
WorkUnitState previousWus = new WorkUnitState();
previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "test_dataset_urn");
previousWus.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true);
previousWus.setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("2015", 100l, "2016", 101l)));
WorkUnitState previousWus2 = new WorkUnitState();
previousWus2.setProp(ConfigurationKeys.DATASET_URN_KEY, "test_dataset_urn2");
previousWus2.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true);
previousWus2.setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("01", 1l, "02", 2l)));
SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus, previousWus2));
PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);
Assert.assertEquals(watermarker.getPreviousWatermarks().size(), 2);
Assert.assertEquals(watermarker.getPreviousWatermarks().get("test_dataset_urn"),
ImmutableMap.of("2015", 100l, "2016", 101l));
Assert.assertEquals(watermarker.getPreviousWatermarks().get("test_dataset_urn2"),
ImmutableMap.of("01", 1l, "02", 2l));
// Make sure all the previousWatermarks are added into current expectedHighWatermarks
Assert.assertEquals(watermarker.getPreviousWatermarks(), watermarker.getExpectedHighWatermarks());
}
@Test(expectedExceptions = IllegalStateException.class)
public void testMoreThanOneWatermarkWorkunits() throws Exception {
WorkUnitState previousWus = new WorkUnitState();
previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "test_dataset_urn");
previousWus.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true);
previousWus.setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("2015", 100l)));
WorkUnitState previousWus2 = new WorkUnitState();
previousWus2.setProp(ConfigurationKeys.DATASET_URN_KEY, "test_dataset_urn");
previousWus2.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true);
previousWus2.setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("2016", 101l)));
SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus, previousWus2));
// Expecting IllegalStateException
new PartitionLevelWatermarker(state);
}
@Test
public void testReadPreviousNullWatermarks() throws Exception {
WorkUnitState previousWus = new WorkUnitState();
previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "test_dataset_urn");
previousWus.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true);
SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus));
PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);
Assert.assertEquals(watermarker.getPreviousWatermarks().size(), 0);
}
@Test
public void testNoPreviousWatermarkWorkunits() throws Exception {
// Create one previous workunit with IS_WATERMARK_WORKUNIT_KEY set to true
WorkUnitState previousWus = new WorkUnitState();
previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "test_dataset_urn");
previousWus.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true);
previousWus.setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("2015", 100l)));
// Create one previous workunit with IS_WATERMARK_WORKUNIT_KEY not set (false)
WorkUnitState previousWus2 = new WorkUnitState();
previousWus2.setProp(ConfigurationKeys.DATASET_URN_KEY, "test_dataset_urn2");
previousWus2.setActualHighWatermark(new LongWatermark(101l));
SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus, previousWus2));
PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);
Assert.assertEquals(watermarker.getPreviousWatermarks().size(), 1);
Assert.assertEquals(watermarker.getPreviousWatermarks().get("test_dataset_urn"), ImmutableMap.of("2015", 100l));
}
@Test
public void testGetPreviousHighWatermarkForPartition() throws Exception {
WorkUnitState previousWus = new WorkUnitState();
previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "db@test_dataset_urn");
previousWus.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true);
previousWus.setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("2015", 100l, "2016", 101l)));
SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus));
PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);
Table table = mockTable("test_dataset_urn");
Partition partition2015 = mockPartition(table, ImmutableList.of("2015"));
Partition partition2016 = mockPartition(table, ImmutableList.of("2016"));
Assert.assertEquals(watermarker.getPreviousHighWatermark(partition2015), new LongWatermark(100l));
Assert.assertEquals(watermarker.getPreviousHighWatermark(partition2016), new LongWatermark(101l));
}
@Test(expectedExceptions = IllegalStateException.class)
public void testPartitionBeginBegoreTableBegin() throws Exception {
SourceState state = new SourceState();
PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);
Table table = mockTable("test_dataset_urn");
Partition partition = mockPartition(table, ImmutableList.of(""));
watermarker.onPartitionProcessBegin(partition, 0l, 0l);
}
@Test
public void testDroppedPartitions() throws Exception {
WorkUnitState previousWus = new WorkUnitState();
previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "db@test_dataset_urn");
previousWus.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true);
previousWus
.setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("2015-01", 100l, "2015-02", 101l)));
SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus));
PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);
Table table = mockTable("test_dataset_urn");
Mockito.when(table.getPartitionKeys()).thenReturn(ImmutableList.of(new FieldSchema("year", "string", "")));
Partition partition2015 = mockPartition(table, ImmutableList.of("2015"));
// partition 2015 replaces 2015-01 and 2015-02
Mockito.when(partition2015.getParameters()).thenReturn(
ImmutableMap.of(AbstractAvroToOrcConverter.REPLACED_PARTITIONS_HIVE_METASTORE_KEY, "2015-01|2015-02"));
watermarker.onPartitionProcessBegin(partition2015, 0l, 0l);
Assert.assertEquals(watermarker.getExpectedHighWatermarks().get("db@test_dataset_urn"), ImmutableMap.of("2015", 0l));
}
// No previous state. 5 new modified partitions. Only 3 most recently modified retained in getExpectedHighWatermark
@Test
public void testRecentlyModifiedPartitionWatermarks() throws Exception {
String dbName = "testRecentlyModifiedPartitionWatermarks";
LocalHiveMetastoreTestUtils.getInstance().dropDatabaseIfExists(dbName);
SourceState state = new SourceState();
state.setProp(HiveSource.HIVE_SOURCE_MAXIMUM_LOOKBACK_DAYS_KEY, 3);
long time5DaysAgo = new DateTime().minusDays(5).getMillis();
PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);
watermarker.setLeastWatermarkToPersistInState(time5DaysAgo);
Table table = localTestTable(dbName, "testTable2", true);
Partition part2010 = localTestPartition(table, ImmutableList.of("2010"));
Partition part2011 = localTestPartition(table, ImmutableList.of("2011"));
Partition part2012 = localTestPartition(table, ImmutableList.of("2012"));
Partition part2013 = localTestPartition(table, ImmutableList.of("2013"));
Partition part2014 = localTestPartition(table, ImmutableList.of("2014"));
watermarker.onTableProcessBegin(table, 0l);
watermarker.onPartitionProcessBegin(part2010, 0l, time5DaysAgo - 100l);
watermarker.onPartitionProcessBegin(part2011, 0l, time5DaysAgo - 101l);
watermarker.onPartitionProcessBegin(part2012, 0l, time5DaysAgo + 102l);
watermarker.onPartitionProcessBegin(part2013, 0l, time5DaysAgo + 103l);
watermarker.onPartitionProcessBegin(part2014, 0l, time5DaysAgo + 104l);
List<WorkUnit> workunits = Lists.newArrayList();
watermarker.onGetWorkunitsEnd(workunits);
Assert.assertEquals(workunits.size(), 1);
WorkUnit watermarkWu = workunits.get(0);
Map<String, Long> workunitWatermarks =
watermarkWu.getExpectedHighWatermark(MultiKeyValueLongWatermark.class).getWatermarks();
Assert.assertEquals(workunitWatermarks.size(), 3, "expectedHighWatermarks size");
ImmutableMap<String, Long> expectedWatermarks =
ImmutableMap.of("2014", time5DaysAgo + 104l, "2013", time5DaysAgo + 103l, "2012", time5DaysAgo + 102l);
Assert.assertEquals(workunitWatermarks, expectedWatermarks);
}
//Previous state 3. New partitions 3. 2 from new state retained
@Test
public void testRecentlyModifiedPartitionWatermarksWithPreviousState() throws Exception {
String dbName = "testRecentlyModifiedPartitionWatermarksWithPreviousState";
LocalHiveMetastoreTestUtils.getInstance().dropDatabaseIfExists(dbName);
long time5DaysAgo = new DateTime().minusDays(5).getMillis();
WorkUnitState previousWus = new WorkUnitState();
previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, dbName + "@testTable2");
previousWus.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true);
previousWus.setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("2010", time5DaysAgo - 100l, // Do not retain
"2011", time5DaysAgo - 101l, // Do not retain
"2012", time5DaysAgo + 102l // Do retain
)));
SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus));
state.setProp(HiveSource.HIVE_SOURCE_MAXIMUM_LOOKBACK_DAYS_KEY, 3);
PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);
watermarker.setLeastWatermarkToPersistInState(time5DaysAgo);
Table table = localTestTable(dbName, "testTable2", true);
// Watermark not retained
Partition part2009 = localTestPartition(table, ImmutableList.of("2009"));
// Watermark retained
Partition part2013 = localTestPartition(table, ImmutableList.of("2013"));
Partition part2014 = localTestPartition(table, ImmutableList.of("2014"));
watermarker.onTableProcessBegin(table, 0l);
// Watermark not retained
watermarker.onPartitionProcessBegin(part2009, 0l, time5DaysAgo - 99l);
// Watermark retained
watermarker.onPartitionProcessBegin(part2013, 0l, time5DaysAgo + 103l);
watermarker.onPartitionProcessBegin(part2014, 0l, time5DaysAgo + 104l);
List<WorkUnit> workunits = Lists.newArrayList();
watermarker.onGetWorkunitsEnd(workunits);
Assert.assertEquals(workunits.size(), 1);
WorkUnit watermarkWu = workunits.get(0);
Map<String, Long> workunitWatermarks =
watermarkWu.getExpectedHighWatermark(MultiKeyValueLongWatermark.class).getWatermarks();
Assert.assertEquals(workunitWatermarks.size(), 3, "expectedHighWatermarks size");
ImmutableMap<String, Long> expectedWatermarks =
ImmutableMap.of("2014", time5DaysAgo + 104l, "2013", time5DaysAgo + 103l, "2012", time5DaysAgo + 102l);
Assert.assertEquals(workunitWatermarks, expectedWatermarks);
}
private static Table mockTable(String name) {
Table table = Mockito.mock(Table.class, Mockito.RETURNS_SMART_NULLS);
Mockito.when(table.getCompleteName()).thenReturn("db@" + name);
return table;
}
private static Partition mockPartition(Table table, List<String> values) {
Partition partition = Mockito.mock(Partition.class, Mockito.RETURNS_SMART_NULLS);
Mockito.when(partition.getTable()).thenReturn(table);
Mockito.when(partition.getValues()).thenReturn(values);
return partition;
}
private static Table localTestTable(String dbName, String name, boolean partitioned) throws Exception {
File tableSdFile = Files.createTempDir();
tableSdFile.deleteOnExit();
return new Table(LocalHiveMetastoreTestUtils.getInstance()
.createTestAvroTable(dbName, name, tableSdFile.getAbsolutePath(),
partitioned ? Optional.of("part") : Optional.<String>absent()));
}
private static Partition localTestPartition(Table table, List<String> values) throws Exception {
return new Partition(table,
LocalHiveMetastoreTestUtils.getInstance().addTestPartition(table.getTTable(), values, 0));
}
}
| 2,326 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive/provider/DatePatternUpdateProviderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.provider;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
public class DatePatternUpdateProviderTest {
private static final long EPOCH_2016_02_02 = 1454400000000l;
private static final long EPOCH_2016_02_02_10 = 1454436000000l;
@Test
public void testDaily() throws Exception {
HiveUnitUpdateProvider updateProvider = new DatePatternUpdateProvider();
Partition mockPartition = createMockPartitionWithLocation("/data/TestEvent/daily/2016/02/02");
Assert.assertEquals(updateProvider.getUpdateTime(mockPartition), EPOCH_2016_02_02);
}
@Test
public void testDailyLate() throws Exception {
HiveUnitUpdateProvider updateProvider = new DatePatternUpdateProvider();
Partition mockPartition = createMockPartitionWithLocation("/data/TestEvent/daily_late/2016/02/02");
Assert.assertEquals(updateProvider.getUpdateTime(mockPartition), EPOCH_2016_02_02);
}
@Test
public void testHourly() throws Exception {
HiveUnitUpdateProvider updateProvider = new DatePatternUpdateProvider();
Partition mockPartition = createMockPartitionWithLocation("/data/TestEvent/hourly/2016/02/02/10");
Assert.assertEquals(updateProvider.getUpdateTime(mockPartition), EPOCH_2016_02_02_10);
}
@Test
public void testHourlyLate() throws Exception {
HiveUnitUpdateProvider updateProvider = new DatePatternUpdateProvider();
Partition mockPartition = createMockPartitionWithLocation("/data/TestEvent/hourly_late/2016/02/02/10");
Assert.assertEquals(updateProvider.getUpdateTime(mockPartition), EPOCH_2016_02_02_10);
}
@Test
public void testHourlyDeduped() throws Exception {
HiveUnitUpdateProvider updateProvider = new DatePatternUpdateProvider();
Partition mockPartition = createMockPartitionWithLocation("/data/TestEvent/hourly_deduped/2016/02/02/10");
Assert.assertEquals(updateProvider.getUpdateTime(mockPartition), EPOCH_2016_02_02_10);
}
@Test(expectedExceptions = UpdateNotFoundException.class)
public void testHourlyInvalid() throws Exception {
HiveUnitUpdateProvider updateProvider = new DatePatternUpdateProvider();
Partition mockPartition = createMockPartitionWithLocation("/data/TestEvent/hourly/2016/02/abc/10");
updateProvider.getUpdateTime(mockPartition);
}
@Test(expectedExceptions = UpdateNotFoundException.class)
public void testNoMatchingPattern() throws Exception {
HiveUnitUpdateProvider updateProvider = new DatePatternUpdateProvider();
Partition mockPartition = createMockPartitionWithLocation("/data/TestEvent/2016/02/02/10");
updateProvider.getUpdateTime(mockPartition);
}
public static Partition createMockPartitionWithLocation(String location) {
Partition mockPartition = Mockito.mock(Partition.class, Mockito.RETURNS_SMART_NULLS);
org.apache.hadoop.hive.metastore.api.Partition mockTPartition =
Mockito.mock(org.apache.hadoop.hive.metastore.api.Partition.class, Mockito.RETURNS_SMART_NULLS);
StorageDescriptor mockSd = Mockito.mock(StorageDescriptor.class, Mockito.RETURNS_SMART_NULLS);
Mockito.when(mockSd.getLocation()).thenReturn(location);
Mockito.when(mockTPartition.getSd()).thenReturn(mockSd);
Mockito.when(mockPartition.getTPartition()).thenReturn(mockTPartition);
return mockPartition;
}
}
| 2,327 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive/materializer/HiveMaterializerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.materializer;
import java.io.File;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.conversion.hive.LocalHiveMetastoreTestUtils;
import org.apache.gobblin.data.management.conversion.hive.entities.TableLikeStageableTableMetadata;
import org.apache.gobblin.data.management.conversion.hive.task.HiveConverterUtils;
import org.apache.gobblin.data.management.conversion.hive.task.HiveTask;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.TaskContext;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.AutoReturnableObject;
import org.apache.gobblin.util.HiveJdbcConnector;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
@Test (groups = {"disabledOnCI"})
public class HiveMaterializerTest {
private final LocalHiveMetastoreTestUtils localHiveMetastore = LocalHiveMetastoreTestUtils.getInstance();
private final String dbName = HiveMaterializerTest.class.getSimpleName();
private final String sourceTableName = "source";
private final String partitionColumn = "part";
private File dataFile;
private HiveJdbcConnector jdbcConnector;
private HiveDataset dataset;
private HiveMetastoreClientPool pool;
@BeforeClass
public void setup() throws Exception {
this.jdbcConnector = HiveJdbcConnector.newEmbeddedConnector(2);
this.dataFile = new File(getClass().getClassLoader().getResource("hiveMaterializerTest/source/").toURI());
this.localHiveMetastore.dropDatabaseIfExists(this.dbName);
this.localHiveMetastore.createTestDb(this.dbName);
this.jdbcConnector.executeStatements(
String.format("CREATE EXTERNAL TABLE %s.%s (id STRING, name String) PARTITIONED BY (%s String) "
+ "ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE",
this.dbName, this.sourceTableName, this.partitionColumn),
String.format("ALTER TABLE %s.%s ADD PARTITION (part = 'part1') LOCATION '%s'",
this.dbName, this.sourceTableName, this.dataFile.getAbsolutePath() + "/part1"),
String.format("ALTER TABLE %s.%s ADD PARTITION (part = 'part2') LOCATION '%s'",
this.dbName, this.sourceTableName, this.dataFile.getAbsolutePath() + "/part2"));
List<List<String>> allTable = executeStatementAndGetResults(this.jdbcConnector,
String.format("SELECT * FROM %s.%s", this.dbName, this.sourceTableName), 3);
Assert.assertEquals(allTable.size(), 8);
List<List<String>> part1 = executeStatementAndGetResults(this.jdbcConnector,
String.format("SELECT * FROM %s.%s WHERE %s='part1'", this.dbName, this.sourceTableName, this.partitionColumn), 3);
Assert.assertEquals(part1.size(), 4);
this.pool = HiveMetastoreClientPool.get(new Properties(), Optional.absent());
Table table;
try (AutoReturnableObject<IMetaStoreClient> client = pool.getClient()) {
table = new Table(client.get().getTable(this.dbName, this.sourceTableName));
}
this.dataset = new HiveDataset(FileSystem.getLocal(new Configuration()), pool, table, new Properties());
}
@AfterClass
public void teardown() throws Exception {
if (this.jdbcConnector != null) {
this.jdbcConnector.close();
}
}
@Test
public void testCopyTable() throws Exception {
String destinationTable = "copyTable";
File tmpDir = Files.createTempDir();
tmpDir.deleteOnExit();
WorkUnit workUnit = HiveMaterializer.tableCopyWorkUnit(this.dataset, new TableLikeStageableTableMetadata(this.dataset.getTable(),
this.dbName, destinationTable, tmpDir.getAbsolutePath()), String.format("%s=part1", this.partitionColumn));
HiveMaterializer hiveMaterializer = new HiveMaterializer(getTaskContextForRun(workUnit));
hiveMaterializer.run();
Assert.assertEquals(hiveMaterializer.getWorkingState(), WorkUnitState.WorkingState.SUCCESSFUL);
hiveMaterializer.commit();
Assert.assertEquals(hiveMaterializer.getWorkingState(), WorkUnitState.WorkingState.SUCCESSFUL);
List<List<String>> allTable = executeStatementAndGetResults(this.jdbcConnector,
String.format("SELECT * FROM %s.%s", this.dbName, destinationTable), 3);
Assert.assertEquals(allTable.size(), 4);
Assert.assertEquals(allTable.stream().map(l -> l.get(0)).collect(Collectors.toList()), Lists.newArrayList("101", "102", "103", "104"));
}
@Test
public void testMaterializeTable() throws Exception {
String destinationTable = "materializeTable";
File tmpDir = Files.createTempDir();
tmpDir.deleteOnExit();
WorkUnit workUnit = HiveMaterializer.viewMaterializationWorkUnit(this.dataset, HiveConverterUtils.StorageFormat.AVRO,
new TableLikeStageableTableMetadata(this.dataset.getTable(), this.dbName, destinationTable, tmpDir.getAbsolutePath()), null);
HiveMaterializer hiveMaterializer = new HiveMaterializer(getTaskContextForRun(workUnit));
hiveMaterializer.run();
Assert.assertEquals(hiveMaterializer.getWorkingState(), WorkUnitState.WorkingState.SUCCESSFUL);
hiveMaterializer.commit();
Assert.assertEquals(hiveMaterializer.getWorkingState(), WorkUnitState.WorkingState.SUCCESSFUL);
List<List<String>> allTable = executeStatementAndGetResults(this.jdbcConnector,
String.format("SELECT * FROM %s.%s", this.dbName, destinationTable), 3);
Assert.assertEquals(allTable.size(), 8);
Assert.assertEquals(allTable.stream().map(l -> l.get(0)).collect(Collectors.toList()),
Lists.newArrayList("101", "102", "103", "104", "201", "202", "203", "204"));
}
@Test
public void testMaterializeTablePartition() throws Exception {
String destinationTable = "materializeTablePartition";
File tmpDir = Files.createTempDir();
tmpDir.deleteOnExit();
WorkUnit workUnit = HiveMaterializer.viewMaterializationWorkUnit(this.dataset, HiveConverterUtils.StorageFormat.AVRO,
new TableLikeStageableTableMetadata(this.dataset.getTable(), this.dbName, destinationTable, tmpDir.getAbsolutePath()),
String.format("%s=part1", this.partitionColumn));
HiveMaterializer hiveMaterializer = new HiveMaterializer(getTaskContextForRun(workUnit));
hiveMaterializer.run();
Assert.assertEquals(hiveMaterializer.getWorkingState(), WorkUnitState.WorkingState.SUCCESSFUL);
hiveMaterializer.commit();
Assert.assertEquals(hiveMaterializer.getWorkingState(), WorkUnitState.WorkingState.SUCCESSFUL);
List<List<String>> allTable = executeStatementAndGetResults(this.jdbcConnector,
String.format("SELECT * FROM %s.%s", this.dbName, destinationTable), 3);
Assert.assertEquals(allTable.size(), 4);
Assert.assertEquals(allTable.stream().map(l -> l.get(0)).collect(Collectors.toList()),
Lists.newArrayList("101", "102", "103", "104"));
}
@Test
public void testMaterializeView() throws Exception {
String destinationTable = "materializeView";
File tmpDir = Files.createTempDir();
tmpDir.deleteOnExit();
String viewName = "myView";
this.jdbcConnector.executeStatements(String.format("CREATE VIEW %s.%s AS SELECT * FROM %s.%s WHERE name = 'foo'",
this.dbName, viewName, this.dbName, this.sourceTableName));
Table view;
try (AutoReturnableObject<IMetaStoreClient> client = pool.getClient()) {
view = new Table(client.get().getTable(this.dbName, viewName));
}
HiveDataset viewDataset = new HiveDataset(FileSystem.getLocal(new Configuration()), pool, view, new Properties());
WorkUnit workUnit = HiveMaterializer.viewMaterializationWorkUnit(viewDataset, HiveConverterUtils.StorageFormat.AVRO,
new TableLikeStageableTableMetadata(viewDataset.getTable(), this.dbName, destinationTable, tmpDir.getAbsolutePath()),
null);
HiveMaterializer hiveMaterializer = new HiveMaterializer(getTaskContextForRun(workUnit));
hiveMaterializer.run();
Assert.assertEquals(hiveMaterializer.getWorkingState(), WorkUnitState.WorkingState.SUCCESSFUL);
hiveMaterializer.commit();
Assert.assertEquals(hiveMaterializer.getWorkingState(), WorkUnitState.WorkingState.SUCCESSFUL);
List<List<String>> allTable = executeStatementAndGetResults(this.jdbcConnector,
String.format("SELECT * FROM %s.%s", this.dbName, destinationTable), 3);
Assert.assertEquals(allTable.size(), 4);
Assert.assertEquals(allTable.stream().map(l -> l.get(0)).collect(Collectors.toList()),
Lists.newArrayList("101", "103", "201", "203"));
}
@Test
public void testMaterializeQuery() throws Exception {
String destinationTable = "materializeQuery";
File tmpDir = Files.createTempDir();
tmpDir.deleteOnExit();
WorkUnit workUnit = HiveMaterializer.queryResultMaterializationWorkUnit(
String.format("SELECT * FROM %s.%s WHERE name = 'foo'", this.dbName, this.sourceTableName),
HiveConverterUtils.StorageFormat.AVRO,
new TableLikeStageableTableMetadata(this.dataset.getTable(), this.dbName, destinationTable, tmpDir.getAbsolutePath()));
HiveMaterializer hiveMaterializer = new HiveMaterializer(getTaskContextForRun(workUnit));
hiveMaterializer.run();
Assert.assertEquals(hiveMaterializer.getWorkingState(), WorkUnitState.WorkingState.SUCCESSFUL);
hiveMaterializer.commit();
Assert.assertEquals(hiveMaterializer.getWorkingState(), WorkUnitState.WorkingState.SUCCESSFUL);
List<List<String>> allTable = executeStatementAndGetResults(this.jdbcConnector,
String.format("SELECT * FROM %s.%s", this.dbName, destinationTable), 3);
Assert.assertEquals(allTable.size(), 4);
Assert.assertEquals(allTable.stream().map(l -> l.get(0)).collect(Collectors.toList()),
Lists.newArrayList("101", "103", "201", "203"));
}
private TaskContext getTaskContextForRun(WorkUnit workUnit) {
workUnit.setProp(ConfigurationKeys.JOB_ID_KEY, "job123");
workUnit.setProp(ConfigurationKeys.TASK_ID_KEY, "task123");
workUnit.setProp(HiveConverterUtils.HIVE_DATASET_DESTINATION_SKIP_SETGROUP, Boolean.toString(true));
HiveTask.disableHiveWatermarker(workUnit);
JobState jobState = new JobState("job", "job123");
return new TaskContext(new WorkUnitState(workUnit, jobState));
}
private List<List<String>> executeStatementAndGetResults(HiveJdbcConnector connector, String query, int columns) throws SQLException {
Connection conn = connector.getConnection();
List<List<String>> result = new ArrayList<>();
try (Statement stmt = conn.createStatement()) {
stmt.execute(query);
ResultSet rs = stmt.getResultSet();
while (rs.next()) {
List<String> thisResult = new ArrayList<>();
for (int i = 0; i < columns; i++) {
thisResult.add(rs.getString(i + 1));
}
result.add(thisResult);
}
}
return result;
}
}
| 2,328 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive/avro/AvroSchemaManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.avro;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import org.apache.avro.Schema;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.serde2.avro.AvroSerDe;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.AvroUtils;
public class AvroSchemaManagerTest {
@Test
public void testGetSchemaFromUrlUsingHiveSchema() throws IOException, HiveException {
FileSystem fs = FileSystem.getLocal(new Configuration());
String jobId = "123";
State state = new State();
state.setProp(ConfigurationKeys.JOB_ID_KEY, jobId);
AvroSchemaManager asm = new AvroSchemaManager(fs, state);
Partition partition = getTestPartition(new Table("testDb", "testTable"));
Path schemaPath = asm.getSchemaUrl(partition);
Schema actualSchema = AvroUtils.parseSchemaFromFile(schemaPath, fs);
String expectedSchema = new String(Files.readAllBytes(
Paths.get(getClass().getClassLoader().getResource("avroSchemaManagerTest/expectedSchema.avsc").getFile())));
Assert.assertEquals(actualSchema.toString(), expectedSchema);
}
private Partition getTestPartition(Table table) throws HiveException {
Partition partition = new Partition(table, ImmutableMap.of("partition_key", "1"), null);
StorageDescriptor sd = new StorageDescriptor();
sd.setSerdeInfo(new SerDeInfo("avro", AvroSerDe.class.getName(), null));
sd.setCols(Lists.newArrayList(new FieldSchema("foo", "int", null)));
partition.getTPartition().setSd(sd);
return partition;
}
}
| 2,329 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive/task/HiveConverterUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.conversion.hive.task;
import java.util.Map;
import org.junit.Test;
import org.testng.Assert;
import org.testng.collections.Maps;
import com.google.common.base.Optional;
public class HiveConverterUtilsTest {
private final String inputDbName = "testdb";
private final String inputTableName = "testtable";
private final String outputDatabaseName = "testdb2";
private final String outputTableName = "testtable2";
@Test
public void copyTableQueryTest() throws Exception {
Map<String, String> partitionsDMLInfo = Maps.newHashMap();
String partitionName = "datepartition";
String partitionValue = "2017-07-15-08";
partitionsDMLInfo.put(partitionName, partitionValue);
String expectedQuery = "INSERT OVERWRITE TABLE `" + outputDatabaseName + "`.`" + outputTableName + "` \n"
+ "PARTITION (`" + partitionName + "`) \n" + "SELECT * FROM `" + inputDbName + "`.`" + inputTableName + "` WHERE "
+ "`" + partitionName + "`='" + partitionsDMLInfo.get(partitionName) + "'";
String actualQuery = HiveConverterUtils.generateTableCopy(inputTableName,
outputTableName, inputDbName, outputDatabaseName, Optional.of(partitionsDMLInfo));
Assert.assertEquals(expectedQuery, actualQuery);
}
}
| 2,330 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/TestCopyableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import com.google.common.collect.Lists;
import org.apache.gobblin.dataset.FileSystemDataset;
/**
* Implementation of {@link CopyableDataset} for testing.
*/
public class TestCopyableDataset implements CopyableDataset, FileSystemDataset {
public static final int FILE_COUNT = 10;
public static final int FILE_LENGTH = 15;
public static final String ORIGIN_PREFIX = "/test";
public static final String DESTINATION_PREFIX = "/destination";
public static final String RELATIVE_PREFIX = "/relative";
public static final OwnerAndPermission OWNER_AND_PERMISSION = new OwnerAndPermission("owner", "group",
FsPermission.getDefault());
private final Path datasetRoot;
public TestCopyableDataset(Path datasetRoot) {
this.datasetRoot = datasetRoot;
}
public TestCopyableDataset() {
this.datasetRoot = new Path(ORIGIN_PREFIX);
}
@Override public Collection<? extends CopyEntity> getCopyableFiles(FileSystem targetFs,
CopyConfiguration configuration)
throws IOException {
List<CopyEntity> files = Lists.newArrayList();
for (int i = 0; i < FILE_COUNT; i++) {
FileStatus origin = new FileStatus(FILE_LENGTH, false, 0, 0, 0, new Path(this.datasetRoot, Integer.toString(i)));
CopyableFile.Builder builder = CopyableFile
.builder(FileSystem.getLocal(new Configuration()), origin, datasetRoot(), configuration).destinationOwnerAndPermission(OWNER_AND_PERMISSION).
ancestorsOwnerAndPermission(Lists.newArrayList(OWNER_AND_PERMISSION)).checksum("checksum".getBytes());
modifyCopyableFile(builder, origin);
files.add(builder.build());
}
return files;
}
@Override
public Path datasetRoot() {
return this.datasetRoot;
}
protected void modifyCopyableFile(CopyableFile.Builder builder, FileStatus origin) {
}
@Override public String datasetURN() {
return datasetRoot().toString();
}
}
| 2,331 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/TestCopyablePartitionableDatasedFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.Lists;
import org.apache.gobblin.dataset.DatasetsFinder;
public class TestCopyablePartitionableDatasedFinder implements DatasetsFinder<CopyableDataset> {
public TestCopyablePartitionableDatasedFinder(FileSystem fs, Properties props) {
}
@Override public List<CopyableDataset> findDatasets() throws IOException {
return Lists.<CopyableDataset>newArrayList(new TestCopyablePartitionableDataset());
}
@Override public Path commonDatasetRoot() {
return new Path("/test");
}
}
| 2,332 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/TestCopyableDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.Lists;
import org.apache.gobblin.dataset.DatasetsFinder;
public class TestCopyableDatasetFinder implements DatasetsFinder<CopyableDataset> {
public TestCopyableDatasetFinder(FileSystem fs, Properties pros) throws IOException {
}
@Override
public List<CopyableDataset> findDatasets() throws IOException {
return Lists.<CopyableDataset> newArrayList(new TestCopyableDataset());
}
@Override public Path commonDatasetRoot() {
return new Path("/test");
}
}
| 2,333 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/TimeAwareRecursiveCopyableDatasetTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.File;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
import java.util.Properties;
import java.util.Random;
import java.util.Set;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.joda.time.DateTimeZone;
import org.joda.time.LocalDateTime;
import org.joda.time.Period;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.joda.time.format.PeriodFormatter;
import org.joda.time.format.PeriodFormatterBuilder;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.filters.HiddenFilter;
@Slf4j
public class TimeAwareRecursiveCopyableDatasetTest {
private FileSystem fs;
private Path baseDir1;
private Path baseDir2;
private Path baseDir3;
private Path baseDir4;
private static final String NUM_LOOKBACK_DAYS_STR = "2d";
private static final Integer NUM_LOOKBACK_DAYS = 2;
private static final String NUM_LOOKBACK_HOURS_STR = "4h";
private static final Integer NUM_LOOKBACK_HOURS = 4;
private static final Integer MAX_NUM_DAILY_DIRS = 4;
private static final Integer MAX_NUM_HOURLY_DIRS = 48;
private static final String NUM_LOOKBACK_DAYS_HOURS_STR = "1d1h";
private static final Integer NUM_DAYS_HOURS_DIRS = 25;
private static final String NUM_LOOKBACK_HOURS_MINS_STR = "1h1m";
@BeforeClass
public void setUp() throws IOException {
Assert.assertTrue(NUM_LOOKBACK_DAYS < MAX_NUM_DAILY_DIRS);
Assert.assertTrue(NUM_LOOKBACK_HOURS < MAX_NUM_HOURLY_DIRS);
this.fs = FileSystem.getLocal(new Configuration());
baseDir1 = new Path("/tmp/src/ds1/hourly");
if (fs.exists(baseDir1)) {
fs.delete(baseDir1, true);
}
fs.mkdirs(baseDir1);
baseDir2 = new Path("/tmp/src/ds1/daily");
if (fs.exists(baseDir2)) {
fs.delete(baseDir2, true);
}
fs.mkdirs(baseDir2);
baseDir3 = new Path("/tmp/src/ds2/daily");
if (fs.exists(baseDir3)) {
fs.delete(baseDir3, true);
}
fs.mkdirs(baseDir3);
baseDir4 = new Path("/tmp/src/ds3/daily");
if (fs.exists(baseDir4)) {
fs.delete(baseDir4, true);
}
fs.mkdirs(baseDir4);
PeriodFormatter formatter = new PeriodFormatterBuilder().appendDays().appendSuffix("d").appendHours().appendSuffix("h").toFormatter();
Period period = formatter.parsePeriod(NUM_LOOKBACK_DAYS_HOURS_STR);
}
@Test
public void testGetFilesAtPath() throws IOException {
String datePattern = "yyyy/MM/dd/HH";
DateTimeFormatter formatter = DateTimeFormat.forPattern(datePattern);
LocalDateTime endDate = LocalDateTime.now(DateTimeZone.forID(TimeAwareRecursiveCopyableDataset.DEFAULT_DATE_PATTERN_TIMEZONE));
Set<String> candidateFiles = new HashSet<>();
for (int i = 0; i < MAX_NUM_HOURLY_DIRS; i++) {
String startDate = endDate.minusHours(i).toString(formatter);
Path subDirPath = new Path(baseDir1, new Path(startDate));
fs.mkdirs(subDirPath);
Path filePath = new Path(subDirPath, i + ".avro");
fs.create(filePath);
if (i < (NUM_LOOKBACK_HOURS + 1)) {
candidateFiles.add(filePath.toString());
}
}
//Lookback time = "4h"
Properties properties = new Properties();
properties.setProperty(TimeAwareRecursiveCopyableDataset.LOOKBACK_TIME_KEY, NUM_LOOKBACK_HOURS_STR);
properties.setProperty(TimeAwareRecursiveCopyableDataset.DATE_PATTERN_KEY, "yyyy/MM/dd/HH");
PathFilter pathFilter = new HiddenFilter();
TimeAwareRecursiveCopyableDataset dataset = new TimeAwareRecursiveCopyableDataset(fs, baseDir1, properties,
new Path("/tmp/src/*/hourly"));
List<FileStatus> fileStatusList = dataset.getFilesAtPath(fs, baseDir1, pathFilter);
Assert.assertEquals(fileStatusList.size(), NUM_LOOKBACK_HOURS + 1);
for (FileStatus fileStatus: fileStatusList) {
Assert.assertTrue(candidateFiles.contains(PathUtils.getPathWithoutSchemeAndAuthority(fileStatus.getPath()).toString()));
}
//Lookback time = "1d1h"
properties = new Properties();
properties.setProperty(TimeAwareRecursiveCopyableDataset.LOOKBACK_TIME_KEY, NUM_LOOKBACK_DAYS_HOURS_STR);
properties.setProperty(TimeAwareRecursiveCopyableDataset.DATE_PATTERN_KEY, "yyyy/MM/dd/HH");
dataset = new TimeAwareRecursiveCopyableDataset(fs, baseDir1, properties,
new Path("/tmp/src/*/hourly"));
fileStatusList = dataset.getFilesAtPath(fs, baseDir1, pathFilter);
candidateFiles = new HashSet<>();
datePattern = "yyyy/MM/dd/HH";
formatter = DateTimeFormat.forPattern(datePattern);
for (int i = 0; i < MAX_NUM_HOURLY_DIRS; i++) {
String startDate = endDate.minusHours(i).toString(formatter);
Path subDirPath = new Path(baseDir1, new Path(startDate));
Path filePath = new Path(subDirPath, i + ".avro");
if (i < NUM_DAYS_HOURS_DIRS + 1) {
candidateFiles.add(filePath.toString());
}
}
Assert.assertEquals(fileStatusList.size(), NUM_DAYS_HOURS_DIRS + 1);
for (FileStatus fileStatus: fileStatusList) {
Assert.assertTrue(candidateFiles.contains(PathUtils.getPathWithoutSchemeAndAuthority(fileStatus.getPath()).toString()));
}
//Lookback time = "2d"
datePattern = "yyyy/MM/dd";
formatter = DateTimeFormat.forPattern(datePattern);
endDate = LocalDateTime.now(DateTimeZone.forID(TimeAwareRecursiveCopyableDataset.DEFAULT_DATE_PATTERN_TIMEZONE));
candidateFiles = new HashSet<>();
for (int i = 0; i < MAX_NUM_DAILY_DIRS; i++) {
String startDate = endDate.minusDays(i).toString(formatter);
Path subDirPath = new Path(baseDir2, new Path(startDate));
fs.mkdirs(subDirPath);
Path filePath = new Path(subDirPath, i + ".avro");
fs.create(filePath);
if (i < (NUM_LOOKBACK_DAYS + 1)) {
candidateFiles.add(filePath.toString());
}
}
// Edge case: test that files that do not match dateformat but within the folders searched by the timeaware finder is ignored
File f = new File(baseDir2.toString() + "/metadata.test");
f.createNewFile();
properties = new Properties();
properties.setProperty(TimeAwareRecursiveCopyableDataset.LOOKBACK_TIME_KEY, NUM_LOOKBACK_DAYS_STR);
properties.setProperty(TimeAwareRecursiveCopyableDataset.DATE_PATTERN_KEY, "yyyy/MM/dd");
dataset = new TimeAwareRecursiveCopyableDataset(fs, baseDir2, properties,
new Path("/tmp/src/*/daily"));
fileStatusList = dataset.getFilesAtPath(fs, baseDir2, pathFilter);
Assert.assertEquals(fileStatusList.size(), NUM_LOOKBACK_DAYS + 1);
for (FileStatus fileStatus: fileStatusList) {
Assert.assertTrue(candidateFiles.contains(PathUtils.getPathWithoutSchemeAndAuthority(fileStatus.getPath()).toString()));
}
// test ds of daily/yyyy-MM-dd-HH-mm
datePattern = "yyyy-MM-dd-HH-mm";
formatter = DateTimeFormat.forPattern(datePattern);
endDate = LocalDateTime.now(DateTimeZone.forID(TimeAwareRecursiveCopyableDataset.DEFAULT_DATE_PATTERN_TIMEZONE));
Random random = new Random();
candidateFiles = new HashSet<>();
for (int i = 0; i < MAX_NUM_DAILY_DIRS; i++) {
String startDate = endDate.minusDays(i).withMinuteOfHour(random.nextInt(60)).toString(formatter);
if (i == 0) {
// avoid future dates on minutes, so have consistency test result
startDate = endDate.minusHours(i).withMinuteOfHour(0).toString(formatter);
}
Path subDirPath = new Path(baseDir3, new Path(startDate));
fs.mkdirs(subDirPath);
Path filePath = new Path(subDirPath, i + ".avro");
fs.create(filePath);
if (i < (NUM_LOOKBACK_DAYS + 1)) {
candidateFiles.add(filePath.toString());
}
}
properties = new Properties();
properties.setProperty(TimeAwareRecursiveCopyableDataset.LOOKBACK_TIME_KEY, "2d1h");
properties.setProperty(TimeAwareRecursiveCopyableDataset.DATE_PATTERN_KEY, "yyyy-MM-dd-HH-mm");
dataset = new TimeAwareRecursiveCopyableDataset(fs, baseDir3, properties,
new Path("/tmp/src/ds2/daily"));
fileStatusList = dataset.getFilesAtPath(fs, baseDir3, pathFilter);
Assert.assertEquals(fileStatusList.size(), NUM_LOOKBACK_DAYS + 1);
for (FileStatus fileStatus: fileStatusList) {
Assert.assertTrue(candidateFiles.contains(PathUtils.getPathWithoutSchemeAndAuthority(fileStatus.getPath()).toString()));
}
// test ds of daily/yyyy-MM-dd-HH-mm-ss
datePattern = "yyyy-MM-dd-HH-mm-ss";
formatter = DateTimeFormat.forPattern(datePattern);
endDate = LocalDateTime.now(DateTimeZone.forID(TimeAwareRecursiveCopyableDataset.DEFAULT_DATE_PATTERN_TIMEZONE));
candidateFiles = new HashSet<>();
for (int i = 0; i < MAX_NUM_DAILY_DIRS; i++) {
String startDate = endDate.minusDays(i).withMinuteOfHour(random.nextInt(60)).withSecondOfMinute(random.nextInt(60)).toString(formatter);
if (i == 0) {
// avoid future dates on minutes, so have consistency test result
startDate = endDate.minusHours(i).withMinuteOfHour(0).withSecondOfMinute(0).toString(formatter);
}
Path subDirPath = new Path(baseDir4, new Path(startDate));
fs.mkdirs(subDirPath);
Path filePath = new Path(subDirPath, i + ".avro");
fs.create(filePath);
if (i < (NUM_LOOKBACK_DAYS + 1)) {
candidateFiles.add(filePath.toString());
}
}
properties = new Properties();
properties.setProperty(TimeAwareRecursiveCopyableDataset.LOOKBACK_TIME_KEY, "2d1h");
properties.setProperty(TimeAwareRecursiveCopyableDataset.DATE_PATTERN_KEY, "yyyy-MM-dd-HH-mm-ss");
dataset = new TimeAwareRecursiveCopyableDataset(fs, baseDir4, properties,
new Path("/tmp/src/ds3/daily"));
fileStatusList = dataset.getFilesAtPath(fs, baseDir4, pathFilter);
Assert.assertEquals(fileStatusList.size(), NUM_LOOKBACK_DAYS + 1);
for (FileStatus fileStatus: fileStatusList) {
Assert.assertTrue(candidateFiles.contains(PathUtils.getPathWithoutSchemeAndAuthority(fileStatus.getPath()).toString()));
}
}
@Test
public void testTimezoneProperty() throws IOException {
// Test in UTC instead of default time
String datePattern = "yyyy/MM/dd/HH";
DateTimeFormatter formatter = DateTimeFormat.forPattern(datePattern);
// Ensure that the files are created in UTC time
LocalDateTime endDate = LocalDateTime.now(DateTimeZone.forID("UTC"));
Set<String> candidateFiles = new HashSet<>();
for (int i = 0; i < MAX_NUM_HOURLY_DIRS; i++) {
String startDate = endDate.minusHours(i).toString(formatter);
Path subDirPath = new Path(baseDir1, new Path(startDate));
fs.mkdirs(subDirPath);
Path filePath = new Path(subDirPath, i + ".avro");
fs.create(filePath);
if (i < (NUM_LOOKBACK_HOURS + 1)) {
candidateFiles.add(filePath.toString());
}
}
//Lookback time = "4h"
Properties properties = new Properties();
properties.setProperty(TimeAwareRecursiveCopyableDataset.LOOKBACK_TIME_KEY, NUM_LOOKBACK_HOURS_STR);
properties.setProperty(TimeAwareRecursiveCopyableDataset.DATE_PATTERN_KEY, "yyyy/MM/dd/HH");
properties.setProperty(TimeAwareRecursiveCopyableDataset.DATE_PATTERN_TIMEZONE_KEY, "UTC");
PathFilter pathFilter = new HiddenFilter();
TimeAwareRecursiveCopyableDataset dataset = new TimeAwareRecursiveCopyableDataset(fs, baseDir1, properties,
new Path("/tmp/src/*/hourly"));
List<FileStatus> fileStatusList = dataset.getFilesAtPath(fs, baseDir1, pathFilter);
Assert.assertEquals(fileStatusList.size(), NUM_LOOKBACK_HOURS + 1);
for (FileStatus fileStatus: fileStatusList) {
Assert.assertTrue(candidateFiles.contains(PathUtils.getPathWithoutSchemeAndAuthority(fileStatus.getPath()).toString()));
}
}
@Test (expectedExceptions = IllegalArgumentException.class)
public void testInstantiationError() {
//Daily directories, but look back time has days and hours. We should expect an assertion error.
Properties properties = new Properties();
properties.setProperty(TimeAwareRecursiveCopyableDataset.LOOKBACK_TIME_KEY, NUM_LOOKBACK_DAYS_HOURS_STR);
properties.setProperty(TimeAwareRecursiveCopyableDataset.DATE_PATTERN_KEY, "yyyy/MM/dd");
TimeAwareRecursiveCopyableDataset dataset = new TimeAwareRecursiveCopyableDataset(fs, baseDir2, properties,
new Path("/tmp/src/*/daily"));
// hourly directories, but look back time has hours and minutes. We should expect an assertion error.
properties = new Properties();
properties.setProperty(TimeAwareRecursiveCopyableDataset.LOOKBACK_TIME_KEY, NUM_LOOKBACK_HOURS_MINS_STR);
properties.setProperty(TimeAwareRecursiveCopyableDataset.DATE_PATTERN_KEY, "yyyy-MM-dd-HH");
dataset = new TimeAwareRecursiveCopyableDataset(fs, baseDir3, properties,
new Path("/tmp/src/ds2/daily"));
}
@Test (expectedExceptions = IllegalArgumentException.class)
public void testIllegalTimezoneProperty() throws IOException {
//Lookback time = "4h"
Properties properties = new Properties();
properties.setProperty(TimeAwareRecursiveCopyableDataset.LOOKBACK_TIME_KEY, NUM_LOOKBACK_HOURS_STR);
properties.setProperty(TimeAwareRecursiveCopyableDataset.DATE_PATTERN_KEY, "yyyy/MM/dd/HH");
properties.setProperty(TimeAwareRecursiveCopyableDataset.DATE_PATTERN_TIMEZONE_KEY, "InvalidTimeZone");
TimeAwareRecursiveCopyableDataset dataset = new TimeAwareRecursiveCopyableDataset(fs, baseDir3, properties,
new Path("/tmp/src/ds2/daily"));
}
@Test
public void testCheckPathDateTimeValidity() {
String datePattern = "yyyy/MM/dd/HH";
DateTimeFormatter formatter = DateTimeFormat.forPattern(datePattern);
LocalDateTime startDate = LocalDateTime.parse("2022/11/30/23", formatter);
LocalDateTime endDate = LocalDateTime.parse("2022/12/30/23", formatter);
// Level 1 is when datePath is "", that case is taken care of in the recursivelyGetFilesAtDatePath function
// Check when year granularity is not in range
Assert.assertFalse(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "2023", datePattern, 2));
Assert.assertFalse(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "2023/11", datePattern, 3));
Assert.assertFalse(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "2023/11/30", datePattern, 4));
Assert.assertFalse(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "2023/11/30/20", datePattern, 5));
// Check when hour granularity is not in range
Assert.assertTrue(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "2022", datePattern, 2));
Assert.assertTrue(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "2022/11", datePattern, 3));
Assert.assertTrue(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "2022/11/30", datePattern, 4));
Assert.assertFalse(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "2022/11/30/20", datePattern, 5));
// Change format and check that all granularities are in range
datePattern = "yyyy/MM/dd/HH/mm";
formatter = DateTimeFormat.forPattern(datePattern);
startDate = LocalDateTime.parse("2022/11/30/23/59", formatter);
endDate = LocalDateTime.parse("2022/12/30/23/59", formatter);
Assert.assertTrue(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "2022", datePattern, 2));
Assert.assertTrue(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "2022/12", datePattern, 3));
Assert.assertTrue(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "2022/12/15", datePattern, 4));
Assert.assertTrue(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "2022/12/15/15", datePattern, 5));
Assert.assertTrue(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "2022/12/15/15/30", datePattern, 6));
// Check when invalid datePath provided when compared against datePattern
Assert.assertFalse(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "test", datePattern, 2));
Assert.assertFalse(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "test/test", datePattern, 3));
Assert.assertFalse(TimeAwareRecursiveCopyableDataset.checkPathDateTimeValidity(startDate, endDate, "test/test/test", datePattern, 4));
}
@AfterClass
public void clean() throws IOException {
//Delete tmp directories
this.fs.delete(baseDir1, true);
this.fs.delete(baseDir2, true);
this.fs.delete(baseDir3, true);
this.fs.delete(baseDir4, true);
}
} | 2,334 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/CopyableFileTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.dataset.DatasetDescriptor;
import org.apache.gobblin.dataset.PartitionDescriptor;
import org.apache.gobblin.util.PathUtils;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class CopyableFileTest {
@Test
public void testSerializeDeserialze() throws Exception {
CopyableFile copyableFile =
new CopyableFile(new FileStatus(10, false, 12, 100, 12345, new Path("/path")), new Path("/destination"),
new OwnerAndPermission("owner", "group", FsPermission.getDefault()),
Lists.newArrayList(new OwnerAndPermission("owner2", "group2", FsPermission.getDefault())),
"checksum".getBytes(), PreserveAttributes.fromMnemonicString(""), "", 0, 0, Maps
.<String, String>newHashMap(), "", null);
DatasetDescriptor dataset = new DatasetDescriptor("hive", "db.table");
PartitionDescriptor descriptor = new PartitionDescriptor("datepartition=2018/09/05", dataset);
copyableFile.setDestinationData(descriptor);
String s = CopyEntity.serialize(copyableFile);
CopyEntity de = CopyEntity.deserialize(s);
Assert.assertEquals(de, copyableFile);
}
@Test
public void testSerializeDeserialzeNulls() throws Exception {
CopyableFile copyableFile =
new CopyableFile(null, null, new OwnerAndPermission("owner", "group",
FsPermission.getDefault()), Lists.newArrayList(new OwnerAndPermission(null, "group2", FsPermission
.getDefault())), "checksum".getBytes(), PreserveAttributes.fromMnemonicString(""), "", 0, 0,
Maps.<String, String>newHashMap(), "", null);
String serialized = CopyEntity.serialize(copyableFile);
CopyEntity deserialized = CopyEntity.deserialize(serialized);
Assert.assertEquals(deserialized, copyableFile);
}
@Test
public void testSerializeDeserialzeList() throws Exception {
List<CopyEntity> copyEntities =
ImmutableList.<CopyEntity>of(CopyableFileUtils.getTestCopyableFile(), CopyableFileUtils.getTestCopyableFile(),
CopyableFileUtils.getTestCopyableFile());
String serialized = CopyEntity.serializeList(copyEntities);
List<CopyEntity> deserialized = CopyEntity.deserializeList(serialized);
Assert.assertEquals(deserialized, copyEntities);
}
@Test
public void testSetFsDatasets() throws URISyntaxException {
FileSystem originFs = mock(FileSystem.class);
String originFsUri = "hdfs://source.company.biz:2000";
String originPath = "/data/databases/source/profile";
when(originFs.getUri()).thenReturn(new URI(originFsUri));
when(originFs.getScheme()).thenReturn("hdfs");
FileSystem targetFs = mock(FileSystem.class);
String targetFsUri = "file:///";
String destinationPath = "/data/databases/destination/profile";
when(targetFs.getUri()).thenReturn(new URI(targetFsUri));
when(targetFs.getScheme()).thenReturn("file");
// Test when source file is not a directory
FileStatus origin = new FileStatus(0l, false, 0, 0l, 0l, new Path(originPath));
CopyableFile copyableFile = new CopyableFile(origin, new Path(destinationPath), null, null, null,
PreserveAttributes.fromMnemonicString(""), "", 0, 0, Maps.<String, String>newHashMap(), "", null);
copyableFile.setFsDatasets(originFs, targetFs);
DatasetDescriptor source = (DatasetDescriptor) copyableFile.getSourceData();
Assert.assertEquals(source.getName(), "/data/databases/source");
Assert.assertEquals(source.getPlatform(), "hdfs");
Assert.assertEquals(source.getMetadata().get("fsUri"), originFsUri);
DatasetDescriptor destination = (DatasetDescriptor) copyableFile.getDestinationData();
Assert.assertEquals(destination.getName(), "/data/databases/destination");
Assert.assertEquals(destination.getPlatform(), "file");
Assert.assertEquals(destination.getMetadata().get("fsUri"), targetFsUri);
// Test when source file is a directory
originPath = originFsUri + originPath;
destinationPath = targetFsUri + destinationPath;
origin = new FileStatus(0l, true, 0, 0l, 0l, new Path(originPath));
copyableFile = new CopyableFile(origin, new Path(destinationPath), null, null, null,
PreserveAttributes.fromMnemonicString(""), "", 0, 0, Maps.<String, String>newHashMap(), "", null);
copyableFile.setFsDatasets(originFs, targetFs);
source = (DatasetDescriptor) copyableFile.getSourceData();
Assert.assertEquals(source.getName(), "/data/databases/source/profile");
Assert.assertEquals(source.getPlatform(), "hdfs");
Assert.assertEquals(source.getMetadata().get("fsUri"), originFsUri);
destination = (DatasetDescriptor) copyableFile.getDestinationData();
Assert.assertEquals(destination.getName(), "/data/databases/destination/profile");
Assert.assertEquals(destination.getPlatform(), "file");
Assert.assertEquals(destination.getMetadata().get("fsUri"), targetFsUri);
}
@Test
public void testCopyableFileBuilderMinimumConfiguration()
throws IOException {
// Source
String datasetRootDir = "/data/databases/source";
Path datasetRoot = new Path(datasetRootDir);
FileSystem originFS = FileSystem.getLocal(new Configuration());
Path originFile = new Path(datasetRootDir, "copyableFile");
FileStatus origin = new FileStatus(0l, false, 0, 0l, System.currentTimeMillis(), originFile);
PreserveAttributes preserveAttributes = PreserveAttributes.fromMnemonicString("ugp");
// Target
String targetRoot = "/data/databases/destination";
Path relativePath = PathUtils.relativizePath(originFile, datasetRoot);
Path targetPath = new Path(targetRoot, relativePath);
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/publisher");
CopyConfiguration copyConfiguration =
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties).preserve(preserveAttributes).build();
CopyableFile copyableFile = CopyableFile.builder(originFS, origin, datasetRoot, copyConfiguration)
.destination(targetPath)
.ancestorsOwnerAndPermission(Lists.<OwnerAndPermission>newArrayList()) // not testing ancestors
.build();
// Making sure all fields are populated correctly via CopyableFile builder
// Verify preserve attribute options
Assert.assertEquals(copyableFile.getPreserve().toMnemonicString(), preserveAttributes.toMnemonicString());
// Verify origin
Assert.assertEquals(copyableFile.getFileSet(), "");
Assert.assertEquals(copyableFile.getOrigin(), origin);
// Verify destination target, permissions and other attributes
Assert.assertEquals(copyableFile.getChecksum().length, 0);
Assert.assertEquals(copyableFile.getDestination().toString(), targetPath.toString());
Assert.assertEquals(copyableFile.getDestinationOwnerAndPermission().getGroup(), origin.getGroup());
Assert.assertEquals(copyableFile.getDestinationOwnerAndPermission().getOwner(), origin.getOwner());
Assert.assertEquals(copyableFile.getDestinationOwnerAndPermission().getFsPermission(),
origin.getPermission());
// Verify auto determined timestamp
Assert.assertEquals(copyableFile.getOriginTimestamp(), origin.getModificationTime());
Assert.assertEquals(copyableFile.getUpstreamTimestamp(), origin.getModificationTime());
}
@Test
public void testCopyableFileBuilderMaximumConfiguration()
throws IOException {
// Source
String datasetRootDir = "/data/databases/source";
Path datasetRoot = new Path(datasetRootDir);
FileSystem originFS = FileSystem.getLocal(new Configuration());
Path originFile = new Path(datasetRootDir, "copyableFile");
FileStatus origin = new FileStatus(0l, false, 0, 0l, System.currentTimeMillis(), originFile);
PreserveAttributes preserveAttributes = PreserveAttributes.fromMnemonicString("ugp");
// Target
String targetRoot = "/data/databases/destination";
Path relativePath = PathUtils.relativizePath(originFile, datasetRoot);
Path targetPath = new Path(targetRoot, relativePath);
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/publisher");
CopyConfiguration copyConfiguration =
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties).preserve(preserveAttributes).build();
// Other attributes
String fileSet = "fileset";
byte[] checksum = new byte[1];
long originTimestamp = 23091986l;
long upstreamTimestamp = 23091986l;
OwnerAndPermission ownerAndPermission = new OwnerAndPermission("gobblin", "gobblin-dev", origin.getPermission());
CopyableFile copyableFile = CopyableFile.builder(originFS, origin, datasetRoot, copyConfiguration)
.fileSet(fileSet).checksum(checksum)
.originTimestamp(originTimestamp).upstreamTimestamp(upstreamTimestamp)
.destinationOwnerAndPermission(ownerAndPermission)
.origin(origin)
.preserve(preserveAttributes)
.destination(targetPath)
.ancestorsOwnerAndPermission(Lists.<OwnerAndPermission>newArrayList())
.build();
// Verify preserve attribute options
Assert.assertEquals(copyableFile.getPreserve().toMnemonicString(), preserveAttributes.toMnemonicString());
// Verify origin
Assert.assertEquals(copyableFile.getFileSet(), fileSet);
Assert.assertEquals(copyableFile.getOrigin(), origin);
// Verify destination target, permissions and other attributes
Assert.assertEquals(copyableFile.getChecksum().length, 1);
Assert.assertEquals(copyableFile.getDestination().toString(), targetPath.toString());
Assert.assertEquals(copyableFile.getDestinationOwnerAndPermission().getGroup(), ownerAndPermission.getGroup());
Assert.assertEquals(copyableFile.getDestinationOwnerAndPermission().getOwner(), ownerAndPermission.getOwner());
Assert.assertEquals(copyableFile.getDestinationOwnerAndPermission().getFsPermission(),
ownerAndPermission.getFsPermission());
// Verify auto determined timestamp
Assert.assertEquals(copyableFile.getOriginTimestamp(), originTimestamp);
Assert.assertEquals(copyableFile.getUpstreamTimestamp(), upstreamTimestamp);
}
@Test
public void testResolveOwnerAndPermission() throws Exception {
Path path = new Path("/test/path");
FileStatus fileStatus = new FileStatus(1, false, 0, 0, 0, 0, FsPermission.getDefault(), "owner", "group", path);
FileSystem fs = mock(FileSystem.class);
Mockito.doReturn(fileStatus).when(fs).getFileStatus(path);
Mockito.doReturn(path).when(fs).makeQualified(path);
Mockito.doReturn(new URI("hdfs://uri")).when(fs).getUri();
Properties properties = new Properties();
properties.put(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/final/dir");
OwnerAndPermission ownerAndPermission = CopyableFile.resolveReplicatedOwnerAndPermission(fs, path,
new CopyConfiguration.CopyConfigurationBuilder(fs, properties).build());
Assert.assertEquals(ownerAndPermission.getOwner(), null);
Assert.assertEquals(ownerAndPermission.getGroup(), null);
Assert.assertEquals(ownerAndPermission.getFsPermission(), null);
ownerAndPermission = CopyableFile.resolveReplicatedOwnerAndPermission(fs, path,
new CopyConfiguration.CopyConfigurationBuilder(fs, properties).targetGroup(Optional.of("target")).build());
Assert.assertEquals(ownerAndPermission.getOwner(), null);
Assert.assertEquals(ownerAndPermission.getGroup(), "target");
Assert.assertEquals(ownerAndPermission.getFsPermission(), null);
ownerAndPermission = CopyableFile.resolveReplicatedOwnerAndPermission(fs, path,
new CopyConfiguration.CopyConfigurationBuilder(fs, properties).targetGroup(Optional.of("target")).
preserve(PreserveAttributes.fromMnemonicString("ug")).build());
Assert.assertEquals(ownerAndPermission.getOwner(), "owner");
Assert.assertEquals(ownerAndPermission.getGroup(), "target");
Assert.assertEquals(ownerAndPermission.getFsPermission(), null);
ownerAndPermission = CopyableFile.resolveReplicatedOwnerAndPermission(fs, path,
new CopyConfiguration.CopyConfigurationBuilder(fs, properties).preserve(PreserveAttributes.fromMnemonicString("ug")).build());
Assert.assertEquals(ownerAndPermission.getOwner(), "owner");
Assert.assertEquals(ownerAndPermission.getGroup(), "group");
Assert.assertEquals(ownerAndPermission.getFsPermission(), null);
ownerAndPermission = CopyableFile.resolveReplicatedOwnerAndPermission(fs, path,
new CopyConfiguration.CopyConfigurationBuilder(fs, properties).preserve(PreserveAttributes.fromMnemonicString("ugp")).build());
Assert.assertEquals(ownerAndPermission.getOwner(), "owner");
Assert.assertEquals(ownerAndPermission.getGroup(), "group");
Assert.assertEquals(ownerAndPermission.getFsPermission(), FsPermission.getDefault());
}
}
| 2,335 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/CopySourceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import com.google.common.base.Optional;
import com.google.common.collect.SetMultimap;
import com.google.common.io.Files;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.watermark.CopyableFileWatermarkGenerator;
import org.apache.gobblin.metrics.event.lineage.LineageInfo;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.util.request_allocation.RequestAllocatorConfig;
import org.apache.hadoop.fs.FileSystem;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Predicates;
import com.google.common.collect.Iterators;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.data.management.dataset.DatasetUtils;
import org.apache.gobblin.data.management.partition.CopyableDatasetRequestor;
import org.apache.gobblin.data.management.partition.FileSet;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.dataset.IterableDatasetFinder;
import org.apache.gobblin.dataset.IterableDatasetFinderImpl;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.JobLauncherUtils;
import org.apache.gobblin.util.request_allocation.PriorityIterableBasedRequestAllocator;
@Slf4j
public class CopySourceTest {
@Test
public void testCopySource()
throws Exception {
SourceState state = new SourceState();
state.setProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, "file:///");
state.setProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, "file:///");
state.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target/dir");
state.setProp(DatasetUtils.DATASET_PROFILE_CLASS_KEY, TestCopyableDatasetFinder.class.getName());
CopySource source = new CopySource();
List<WorkUnit> workunits = source.getWorkunits(state);
workunits = JobLauncherUtils.flattenWorkUnits(workunits);
Assert.assertEquals(workunits.size(), TestCopyableDataset.FILE_COUNT);
Extract extract = workunits.get(0).getExtract();
for (WorkUnit workUnit : workunits) {
CopyableFile file = (CopyableFile) CopySource.deserializeCopyEntity(workUnit);
Assert.assertTrue(file.getOrigin().getPath().toString().startsWith(TestCopyableDataset.ORIGIN_PREFIX));
Assert.assertEquals(file.getDestinationOwnerAndPermission(), TestCopyableDataset.OWNER_AND_PERMISSION);
Assert.assertEquals(workUnit.getProp(ServiceConfigKeys.WORK_UNIT_SIZE), String.valueOf(TestCopyableDataset.FILE_LENGTH));
Assert.assertEquals(workUnit.getExtract(), extract);
}
}
@Test
public void testPartitionableDataset()
throws Exception {
SourceState state = new SourceState();
state.setProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, "file:///");
state.setProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, "file:///");
state.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target/dir");
state.setProp(DatasetUtils.DATASET_PROFILE_CLASS_KEY,
TestCopyablePartitionableDatasedFinder.class.getCanonicalName());
CopySource source = new CopySource();
List<WorkUnit> workunits = source.getWorkunits(state);
workunits = JobLauncherUtils.flattenWorkUnits(workunits);
Assert.assertEquals(workunits.size(), TestCopyableDataset.FILE_COUNT);
Extract extractAbove = null;
Extract extractBelow = null;
for (WorkUnit workUnit : workunits) {
CopyableFile copyableFile = (CopyableFile) CopySource.deserializeCopyEntity(workUnit);
Assert.assertTrue(copyableFile.getOrigin().getPath().toString().startsWith(TestCopyableDataset.ORIGIN_PREFIX));
Assert.assertEquals(copyableFile.getDestinationOwnerAndPermission(), TestCopyableDataset.OWNER_AND_PERMISSION);
if (Integer.parseInt(copyableFile.getOrigin().getPath().getName()) < TestCopyablePartitionableDataset.THRESHOLD) {
// should be in extractBelow
if (extractBelow == null) {
extractBelow = workUnit.getExtract();
}
Assert.assertEquals(workUnit.getExtract(), extractBelow);
} else {
// should be in extractAbove
if (extractAbove == null) {
extractAbove = workUnit.getExtract();
}
Assert.assertEquals(workUnit.getExtract(), extractAbove);
}
}
Assert.assertNotNull(extractAbove);
Assert.assertNotNull(extractBelow);
}
@Test
public void testSubmitUnfulfilledRequestEvents()
throws IOException, NoSuchMethodException, InvocationTargetException, IllegalAccessException {
SourceState state = new SourceState();
state.setProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, "file:///");
state.setProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, "file:///");
state.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target/dir");
state.setProp(DatasetUtils.DATASET_PROFILE_CLASS_KEY,
TestCopyablePartitionableDatasedFinder.class.getCanonicalName());
state.setProp(CopySource.MAX_CONCURRENT_LISTING_SERVICES, 2);
state.setProp(CopyConfiguration.MAX_COPY_PREFIX + ".size", "50");
state.setProp(CopyConfiguration.MAX_COPY_PREFIX + ".copyEntities", 2);
state.setProp(CopyConfiguration.STORE_REJECTED_REQUESTS_KEY,
RequestAllocatorConfig.StoreRejectedRequestsConfig.ALL.name().toLowerCase());
state.setProp(ConfigurationKeys.METRICS_CUSTOM_BUILDERS, "org.apache.gobblin.metrics.ConsoleEventReporterFactory");
CopySource source = new CopySource();
final FileSystem sourceFs = HadoopUtils.getSourceFileSystem(state);
final FileSystem targetFs = HadoopUtils.getWriterFileSystem(state, 1, 0);
int maxThreads = state
.getPropAsInt(CopySource.MAX_CONCURRENT_LISTING_SERVICES, CopySource.DEFAULT_MAX_CONCURRENT_LISTING_SERVICES);
final CopyConfiguration copyConfiguration = CopyConfiguration.builder(targetFs, state.getProperties()).build();
MetricContext metricContext = Instrumented.getMetricContext(state, CopySource.class);
EventSubmitter eventSubmitter = new EventSubmitter.Builder(metricContext, CopyConfiguration.COPY_PREFIX).build();
DatasetsFinder<CopyableDatasetBase> datasetFinder = DatasetUtils
.instantiateDatasetFinder(state.getProperties(), sourceFs, CopySource.DEFAULT_DATASET_PROFILE_CLASS_KEY,
eventSubmitter, state);
IterableDatasetFinder<CopyableDatasetBase> iterableDatasetFinder =
datasetFinder instanceof IterableDatasetFinder ? (IterableDatasetFinder<CopyableDatasetBase>) datasetFinder
: new IterableDatasetFinderImpl<>(datasetFinder);
Iterator<CopyableDatasetRequestor> requestorIteratorWithNulls = Iterators
.transform(iterableDatasetFinder.getDatasetsIterator(),
new CopyableDatasetRequestor.Factory(targetFs, copyConfiguration, log));
Iterator<CopyableDatasetRequestor> requestorIterator =
Iterators.filter(requestorIteratorWithNulls, Predicates.<CopyableDatasetRequestor>notNull());
Method m = CopySource.class.getDeclaredMethod("createRequestAllocator", CopyConfiguration.class, int.class);
m.setAccessible(true);
PriorityIterableBasedRequestAllocator<FileSet<CopyEntity>> allocator =
(PriorityIterableBasedRequestAllocator<FileSet<CopyEntity>>) m.invoke(source, copyConfiguration, maxThreads);
Iterator<FileSet<CopyEntity>> prioritizedFileSets =
allocator.allocateRequests(requestorIterator, copyConfiguration.getMaxToCopy());
List<FileSet<CopyEntity>> fileSetList = allocator.getRequestsExceedingAvailableResourcePool();
Assert.assertEquals(fileSetList.size(), 2);
FileSet<CopyEntity> fileSet = fileSetList.get(0);
Assert.assertEquals(fileSet.getDataset().getUrn(), "/test");
Assert.assertEquals(fileSet.getTotalEntities(), 5);
Assert.assertEquals(fileSet.getTotalSizeInBytes(), 75);
fileSet = fileSetList.get(1);
Assert.assertEquals(fileSet.getDataset().getUrn(), "/test");
Assert.assertEquals(fileSet.getTotalEntities(), 5);
Assert.assertEquals(fileSet.getTotalSizeInBytes(), 75);
}
@Test(expectedExceptions = IOException.class)
public void testFailIfAllAllocationRequestsRejected()
throws IOException, NoSuchMethodException, InvocationTargetException, IllegalAccessException {
SourceState state = new SourceState();
state.setProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, "file:///");
state.setProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, "file:///");
state.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target/dir");
state.setProp(DatasetUtils.DATASET_PROFILE_CLASS_KEY,
TestCopyablePartitionableDatasedFinder.class.getCanonicalName());
state.setProp(CopySource.MAX_CONCURRENT_LISTING_SERVICES, 2);
state.setProp(CopyConfiguration.MAX_COPY_PREFIX + ".size", "50");
state.setProp(CopyConfiguration.MAX_COPY_PREFIX + ".copyEntities", "2");
state.setProp(CopyConfiguration.STORE_REJECTED_REQUESTS_KEY,
RequestAllocatorConfig.StoreRejectedRequestsConfig.ALL.name().toLowerCase());
state.setProp(ConfigurationKeys.METRICS_CUSTOM_BUILDERS, "org.apache.gobblin.metrics.ConsoleEventReporterFactory");
CopySource source = new CopySource();
final FileSystem sourceFs = HadoopUtils.getSourceFileSystem(state);
final FileSystem targetFs = HadoopUtils.getWriterFileSystem(state, 1, 0);
int maxThreads = state
.getPropAsInt(CopySource.MAX_CONCURRENT_LISTING_SERVICES, CopySource.DEFAULT_MAX_CONCURRENT_LISTING_SERVICES);
final CopyConfiguration copyConfiguration = CopyConfiguration.builder(targetFs, state.getProperties()).build();
MetricContext metricContext = Instrumented.getMetricContext(state, CopySource.class);
EventSubmitter eventSubmitter = new EventSubmitter.Builder(metricContext, CopyConfiguration.COPY_PREFIX).build();
DatasetsFinder<CopyableDatasetBase> datasetFinder = DatasetUtils
.instantiateDatasetFinder(state.getProperties(), sourceFs, CopySource.DEFAULT_DATASET_PROFILE_CLASS_KEY,
eventSubmitter, state);
IterableDatasetFinder<CopyableDatasetBase> iterableDatasetFinder =
datasetFinder instanceof IterableDatasetFinder ? (IterableDatasetFinder<CopyableDatasetBase>) datasetFinder
: new IterableDatasetFinderImpl<>(datasetFinder);
Iterator<CopyableDatasetRequestor> requesterIteratorWithNulls = Iterators
.transform(iterableDatasetFinder.getDatasetsIterator(),
new CopyableDatasetRequestor.Factory(targetFs, copyConfiguration, log));
Iterator<CopyableDatasetRequestor> requesterIterator =
Iterators.filter(requesterIteratorWithNulls, Predicates.<CopyableDatasetRequestor>notNull());
Method m = CopySource.class.getDeclaredMethod("createRequestAllocator", CopyConfiguration.class, int.class);
m.setAccessible(true);
PriorityIterableBasedRequestAllocator<FileSet<CopyEntity>> allocator =
(PriorityIterableBasedRequestAllocator<FileSet<CopyEntity>>) m.invoke(source, copyConfiguration, maxThreads);
Iterator<FileSet<CopyEntity>> prioritizedFileSets =
allocator.allocateRequests(requesterIterator, copyConfiguration.getMaxToCopy());
List<FileSet<CopyEntity>> fileSetList = allocator.getRequestsExceedingAvailableResourcePool();
Assert.assertEquals(fileSetList.size(), 2);
source.failJobIfAllRequestsRejected(allocator, prioritizedFileSets);
}
@Test
public void testPassIfNoAllocationsRejected()
throws IOException, NoSuchMethodException, InvocationTargetException, IllegalAccessException {
SourceState state = new SourceState();
state.setProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, "file:///");
state.setProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, "file:///");
state.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target/dir");
state.setProp(DatasetUtils.DATASET_PROFILE_CLASS_KEY,
TestCopyablePartitionableDatasedFinder.class.getCanonicalName());
state.setProp(CopySource.MAX_CONCURRENT_LISTING_SERVICES, 2);
state.setProp(CopyConfiguration.MAX_COPY_PREFIX + ".size", "100");
state.setProp(CopyConfiguration.MAX_COPY_PREFIX + ".copyEntities", "10");
state.setProp(CopyConfiguration.STORE_REJECTED_REQUESTS_KEY,
RequestAllocatorConfig.StoreRejectedRequestsConfig.ALL.name().toLowerCase());
state.setProp(ConfigurationKeys.METRICS_CUSTOM_BUILDERS, "org.apache.gobblin.metrics.ConsoleEventReporterFactory");
CopySource source = new CopySource();
final FileSystem sourceFs = HadoopUtils.getSourceFileSystem(state);
final FileSystem targetFs = HadoopUtils.getWriterFileSystem(state, 1, 0);
int maxThreads = state
.getPropAsInt(CopySource.MAX_CONCURRENT_LISTING_SERVICES, CopySource.DEFAULT_MAX_CONCURRENT_LISTING_SERVICES);
final CopyConfiguration copyConfiguration = CopyConfiguration.builder(targetFs, state.getProperties()).build();
MetricContext metricContext = Instrumented.getMetricContext(state, CopySource.class);
EventSubmitter eventSubmitter = new EventSubmitter.Builder(metricContext, CopyConfiguration.COPY_PREFIX).build();
DatasetsFinder<CopyableDatasetBase> datasetFinder = DatasetUtils
.instantiateDatasetFinder(state.getProperties(), sourceFs, CopySource.DEFAULT_DATASET_PROFILE_CLASS_KEY,
eventSubmitter, state);
IterableDatasetFinder<CopyableDatasetBase> iterableDatasetFinder =
datasetFinder instanceof IterableDatasetFinder ? (IterableDatasetFinder<CopyableDatasetBase>) datasetFinder
: new IterableDatasetFinderImpl<>(datasetFinder);
Iterator<CopyableDatasetRequestor> requesterIteratorWithNulls = Iterators
.transform(iterableDatasetFinder.getDatasetsIterator(),
new CopyableDatasetRequestor.Factory(targetFs, copyConfiguration, log));
Iterator<CopyableDatasetRequestor> requesterIterator =
Iterators.filter(requesterIteratorWithNulls, Predicates.<CopyableDatasetRequestor>notNull());
Method m = CopySource.class.getDeclaredMethod("createRequestAllocator", CopyConfiguration.class, int.class);
m.setAccessible(true);
PriorityIterableBasedRequestAllocator<FileSet<CopyEntity>> allocator =
(PriorityIterableBasedRequestAllocator<FileSet<CopyEntity>>) m.invoke(source, copyConfiguration, maxThreads);
Iterator<FileSet<CopyEntity>> prioritizedFileSets =
allocator.allocateRequests(requesterIterator, copyConfiguration.getMaxToCopy());
List<FileSet<CopyEntity>> fileSetList = allocator.getRequestsExceedingAvailableResourcePool();
Assert.assertEquals(fileSetList.size(), 0);
source.failJobIfAllRequestsRejected(allocator, prioritizedFileSets);
}
@Test
public void testDefaultHiveDatasetShardTempPaths()
throws IOException, NoSuchMethodException, InvocationTargetException, IllegalAccessException {
SourceState state = new SourceState();
Properties copyProperties = new Properties();
copyProperties.put(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target");
File tempDir = Files.createTempDir();
String tempDirRoot = tempDir.getPath();
tempDir.deleteOnExit();
state.setProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, "file:///");
state.setProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, "file:///");
state.setProp("hive.dataset.whitelist", "testDB.table*"); // using a mock class so the finder will always find 3 tables regardless of this setting
state.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target/dir");
state.setProp(DatasetUtils.DATASET_PROFILE_CLASS_KEY, MockHiveDatasetFinder.class.getName());
state.setProp(ConfigurationKeys.USE_DATASET_LOCAL_WORK_DIR, "true");
state.setProp("tempDirRoot", tempDirRoot);
state.setProp(CopyConfiguration.STORE_REJECTED_REQUESTS_KEY,
RequestAllocatorConfig.StoreRejectedRequestsConfig.ALL.name().toLowerCase());
state.setProp(ConfigurationKeys.JOB_NAME_KEY, "jobName");
state.setProp(ConfigurationKeys.JOB_ID_KEY, "jobId");
CopySource source = new CopySource();
List<WorkUnit> workunits = source.getWorkunits(state);
workunits = JobLauncherUtils.flattenWorkUnits(workunits);
Assert.assertEquals(workunits.size(), 6); // workunits are created for pre and post publish steps
// workunits are not guaranteed to be created in any order, remove duplicate paths
Set<String> datasetPaths = workunits.stream().map(w -> w.getProp(ConfigurationKeys.DATASET_DESTINATION_PATH)).collect(
Collectors.toSet());
for (int i = 0; i < 3; i++) {
Assert.assertEquals(datasetPaths.contains(tempDirRoot + "/targetPath/testDB/table" + i), true);
}
}
@Test (expectedExceptions = RuntimeException.class)
public void testGetWorkUnitsExecutionFastFailure() {
SourceState state = new SourceState();
state.setProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, "file:///");
state.setProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, "file:///");
state.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target/dir");
state.setProp(DatasetUtils.DATASET_PROFILE_CLASS_KEY,
TestCopyablePartitionableDatasedFinder.class.getCanonicalName());
state.setProp(ConfigurationKeys.COPY_SOURCE_FILESET_WU_GENERATOR_CLASS, AlwaysThrowsMockedFileSetWorkUnitGenerator.class.getName());
state.setProp(ConfigurationKeys.WORK_UNIT_GENERATOR_FAILURE_IS_FATAL, ConfigurationKeys.DEFAULT_WORK_UNIT_FAST_FAIL_ENABLED);
CopySource source = new CopySource();
// throws the runtime exception after encountering a failure generating the work units
List<WorkUnit> workunits = source.getWorkunits(state);
Assert.assertNull(workunits);
}
class AlwaysThrowsMockedFileSetWorkUnitGenerator extends CopySource.FileSetWorkUnitGenerator {
public AlwaysThrowsMockedFileSetWorkUnitGenerator(CopyableDatasetBase copyableDataset, FileSet<CopyEntity> fileSet, State state,
FileSystem targetFs, SetMultimap<FileSet<CopyEntity>, WorkUnit> workUnitList,
Optional<CopyableFileWatermarkGenerator> watermarkGenerator, long minWorkUnitWeight,
Optional<LineageInfo> lineageInfo) {
super(copyableDataset, fileSet, state, targetFs, workUnitList, watermarkGenerator, minWorkUnitWeight,
lineageInfo);
}
@Override
public Void call(){
throw new RuntimeException("boom!");
}
}
}
| 2,336 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/PreserveAttributesTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import org.apache.gobblin.data.management.copy.PreserveAttributes.Option;
import java.util.Map;
import java.util.Set;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
public class PreserveAttributesTest {
@Test
public void test() {
Map<String, Set<PreserveAttributes.Option>> tests = Maps.newHashMap();
tests.put("r", Sets.newHashSet(Option.REPLICATION));
tests.put("b", Sets.newHashSet(Option.BLOCK_SIZE));
tests.put("u", Sets.newHashSet(Option.OWNER));
tests.put("g", Sets.newHashSet(Option.GROUP));
tests.put("p", Sets.newHashSet(Option.PERMISSION));
tests.put("ru", Sets.newHashSet(Option.REPLICATION, Option.OWNER));
tests.put("rbugp", Sets.newHashSet(Option.REPLICATION, Option.OWNER, Option.BLOCK_SIZE, Option.GROUP,
Option.PERMISSION));
tests.put("rrr", Sets.newHashSet(Option.REPLICATION));
tests.put("rrb", Sets.newHashSet(Option.REPLICATION, Option.BLOCK_SIZE));
tests.put("", Sets.<Option>newHashSet());
for(Map.Entry<String, Set<PreserveAttributes.Option>> entry : tests.entrySet()) {
PreserveAttributes preserve = PreserveAttributes.fromMnemonicString(entry.getKey());
for(Option option : Option.values()) {
Assert.assertEquals(preserve.preserve(option), entry.getValue().contains(option));
}
Assert.assertEquals(preserve, PreserveAttributes.fromMnemonicString(preserve.toMnemonicString()));
}
}
@Test
public void testInvalidStrings() {
try {
PreserveAttributes.fromMnemonicString("x");
Assert.fail();
} catch (IllegalArgumentException iae) {
// Expect exception
}
try {
PreserveAttributes.fromMnemonicString("rx");
Assert.fail();
} catch (IllegalArgumentException iae) {
// Expect exception
}
try {
PreserveAttributes.fromMnemonicString("ugq");
Assert.fail();
} catch (IllegalArgumentException iae) {
// Expect exception
}
}
}
| 2,337 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/TestCopyablePartitionableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import org.apache.hadoop.fs.FileStatus;
/**
* Partitionable dataset that partitions files depending on whether their name, parsed as an integer, is above or below
* a threshold.
*/
public class TestCopyablePartitionableDataset extends TestCopyableDataset {
public static final String BELOW = "below";
public static final String ABOVE = "above";
public static final int THRESHOLD = TestCopyableDataset.FILE_COUNT / 2;
@Override protected void modifyCopyableFile(CopyableFile.Builder builder, FileStatus origin) {
super.modifyCopyableFile(builder, origin);
if (Integer.parseInt(origin.getPath().getName()) < THRESHOLD) {
builder.fileSet(BELOW);
} else {
builder.fileSet(ABOVE);
}
}
}
| 2,338 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/CopyableDatasetMetadataTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Multimap;
public class CopyableDatasetMetadataTest {
@Test
public void testSerializeDeserialize() throws Exception {
CopyableDataset copyableDataset = new TestCopyableDataset();
Path target = new Path("/target");
CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(copyableDataset);
String serialized = metadata.serialize();
CopyableDatasetMetadata deserialized = CopyableDatasetMetadata.deserialize(serialized);
Assert.assertEquals(copyableDataset.datasetURN(), deserialized.getDatasetURN());
}
@Test
public void testHashCode() throws Exception {
CopyableDataset copyableDataset = new TestCopyableDataset();
Path target = new Path("/target");
CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(copyableDataset);
String serialized = metadata.serialize();
CopyableDatasetMetadata deserialized = CopyableDatasetMetadata.deserialize(serialized);
CopyableDatasetMetadata deserialized2 = CopyableDatasetMetadata.deserialize(serialized);
Multimap<CopyableDatasetMetadata, WorkUnitState> datasetRoots = ArrayListMultimap.create();
datasetRoots.put(deserialized, new WorkUnitState());
datasetRoots.put(deserialized2, new WorkUnitState());
Assert.assertEquals(datasetRoots.keySet().size(), 1);
}
}
| 2,339 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/UnixTimestampRecursiveCopyableDatasetTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.joda.time.DateTimeZone;
import org.joda.time.LocalDateTime;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
@Test(enabled = false)
public class UnixTimestampRecursiveCopyableDatasetTest {
String rootPath = "/tmp/src";
String databaseName = "dbName";
String tableName = "tableName";
String sourceDir = rootPath + "/" + databaseName + "/" + tableName;
private Path baseSrcDir;
private FileSystem fs;
private Path baseDstDir;
private static final String NUM_LOOKBACK_DAYS_STR = "2d";
private static final Integer MAX_NUM_DAILY_DIRS = 4;
private static final Integer NUM_DIRS_PER_DAY = 5;
private static final Integer NUM_FILES_PER_DIR = 3;
@BeforeClass
public void setUp()
throws IOException {
this.fs = FileSystem.getLocal(new Configuration());
baseSrcDir = new Path(sourceDir);
if (fs.exists(baseSrcDir)) {
fs.delete(baseSrcDir, true);
}
fs.mkdirs(baseSrcDir);
baseDstDir = new Path("/tmp/dst/dataset1/");
if (fs.exists(baseDstDir)) {
fs.delete(baseDstDir, true);
}
fs.mkdirs(baseDstDir);
}
@Test(enabled = false)
public void testGetFilesAtPath()
throws IOException {
//1570544993735-PT-499913495
LocalDateTime endDate =
LocalDateTime.now(DateTimeZone.forID(TimeAwareRecursiveCopyableDataset.DEFAULT_DATE_PATTERN_TIMEZONE));
for (int i = 0; i < MAX_NUM_DAILY_DIRS; i++) {
for (int j = 0; j < NUM_DIRS_PER_DAY; j++) {
Path subDirPath =
new Path(baseSrcDir, new Path(endDate.toDateTime().plusSeconds(60).getMillis() + "-PT-100000"));
fs.mkdirs(subDirPath);
for (int k = 0; k < NUM_FILES_PER_DIR; k++) {
Path filePath = new Path(subDirPath, k + ".avro");
fs.create(filePath);
}
endDate = endDate.minusMinutes(10);
}
endDate = endDate.minusDays(1);
}
PathFilter ACCEPT_ALL_PATH_FILTER = new PathFilter() {
@Override
public boolean accept(Path path) {
return true;
}
};
//
// Test db level copy, Qualifying Regex: ".*([0-9]{13})-PT-.*/.*", dataset root = /tmp/src/databaseName
//
Properties properties = new Properties();
properties.setProperty("gobblin.dataset.pattern", sourceDir);
properties.setProperty(TimeAwareRecursiveCopyableDataset.DATE_PATTERN_TIMEZONE_KEY,
TimeAwareRecursiveCopyableDataset.DEFAULT_DATE_PATTERN_TIMEZONE);
properties.setProperty(TimeAwareRecursiveCopyableDataset.LOOKBACK_TIME_KEY, NUM_LOOKBACK_DAYS_STR);
properties.setProperty(UnixTimestampRecursiveCopyableDataset.VERSION_SELECTION_POLICY, "ALL");
properties.setProperty(UnixTimestampRecursiveCopyableDataset.TIMESTAMP_REGEEX, ".*/([0-9]{13})-PT-.*/.*");
UnixTimestampCopyableDatasetFinder finder = new UnixTimestampCopyableDatasetFinder(fs, properties);
// Snap shot selection policy = ALL
String datasetRoot = rootPath + "/" + databaseName;
UnixTimestampRecursiveCopyableDataset dataset = (UnixTimestampRecursiveCopyableDataset) finder.datasetAtPath(new Path(datasetRoot));
List<FileStatus> fileStatusList = dataset.getFilesAtPath(fs, baseSrcDir, ACCEPT_ALL_PATH_FILTER);
Assert.assertTrue(fileStatusList.size() == 30);
// version selection policy = EARLIEST
properties.setProperty(UnixTimestampRecursiveCopyableDataset.VERSION_SELECTION_POLICY, "EARLIEST");
finder = new UnixTimestampCopyableDatasetFinder(fs, properties);
dataset = (UnixTimestampRecursiveCopyableDataset) finder.datasetAtPath(new Path(datasetRoot));
fileStatusList = dataset.getFilesAtPath(fs, baseSrcDir, ACCEPT_ALL_PATH_FILTER);
Assert.assertTrue(fileStatusList.size() == 6);
// version selection policy = LATEST
properties.setProperty(UnixTimestampRecursiveCopyableDataset.VERSION_SELECTION_POLICY, "latest");
finder = new UnixTimestampCopyableDatasetFinder(fs, properties);
dataset = (UnixTimestampRecursiveCopyableDataset) finder.datasetAtPath(new Path(datasetRoot));
fileStatusList = dataset.getFilesAtPath(fs, baseSrcDir, ACCEPT_ALL_PATH_FILTER);
Assert.assertTrue(fileStatusList.size() == 6);
//
// Test table level copy, Qualifying Regex: ".*/([0-9]{13})-PT-.*/.*")\, dataset root = /tmp/src/databaseName/tableName
//
properties.setProperty(UnixTimestampRecursiveCopyableDataset.TIMESTAMP_REGEEX, "([0-9]{13})-PT-.*/.*");
finder = new UnixTimestampCopyableDatasetFinder(fs, properties);
datasetRoot = rootPath + "/" + databaseName + "/" + tableName;
// Snap shot selection policy = ALL
properties.setProperty(UnixTimestampRecursiveCopyableDataset.VERSION_SELECTION_POLICY, "ALL");
dataset = (UnixTimestampRecursiveCopyableDataset) finder.datasetAtPath(new Path(datasetRoot));
fileStatusList = dataset.getFilesAtPath(fs, baseSrcDir, ACCEPT_ALL_PATH_FILTER);
Assert.assertTrue(fileStatusList.size() == 30);
// Snap shot selection policy = EARLIEST
properties.setProperty(UnixTimestampRecursiveCopyableDataset.VERSION_SELECTION_POLICY, "EARLIEST");
finder = new UnixTimestampCopyableDatasetFinder(fs, properties);
dataset = (UnixTimestampRecursiveCopyableDataset) finder.datasetAtPath(new Path(datasetRoot));
fileStatusList = dataset.getFilesAtPath(fs, baseSrcDir, ACCEPT_ALL_PATH_FILTER);
Assert.assertTrue(fileStatusList.size() == 6);
// Snap shot selection policy = LATEST
properties.setProperty(UnixTimestampRecursiveCopyableDataset.VERSION_SELECTION_POLICY, "latest");
finder = new UnixTimestampCopyableDatasetFinder(fs, properties);
dataset = (UnixTimestampRecursiveCopyableDataset) finder.datasetAtPath(new Path(datasetRoot));
fileStatusList = dataset.getFilesAtPath(fs, baseSrcDir, ACCEPT_ALL_PATH_FILTER);
Assert.assertTrue(fileStatusList.size() == 6);
}
@Test(enabled = false)
public void testRegex() {
String dbRegex = ".*/([0-9]{13}).*/.*";
long now = System.currentTimeMillis();
String path = "tableName/" + now + "-PT-12345/part1.avro";
Pattern pattern = Pattern.compile(dbRegex);
Matcher matcher = pattern.matcher(path);
Assert.assertTrue(matcher.matches());
Assert.assertEquals(Long.parseLong(matcher.group(1)), now);
String tableRegex = "([0-9]{13}).*/.*";
path = now + "-PT-12345/part1.avro";
pattern = Pattern.compile(tableRegex);
matcher = pattern.matcher(path);
Assert.assertTrue(matcher.matches());
Assert.assertEquals(Long.parseLong(matcher.group(1)), now);
}
@AfterClass
public void clean()
throws IOException {
//Delete tmp directories
this.fs.delete(baseSrcDir, true);
this.fs.delete(baseDstDir, true);
}
}
| 2,340 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/TimestampBasedCopyableDatasetTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTime;
import org.testng.Assert;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.gobblin.data.management.policy.VersionSelectionPolicy;
import org.apache.gobblin.data.management.version.DatasetVersion;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
import org.apache.gobblin.data.management.version.finder.VersionFinder;
import org.apache.gobblin.dataset.Dataset;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Test for {@link TimestampBasedCopyableDataset}.
*/
@Test(groups = {"gobblin.data.management.copy"})
public class TimestampBasedCopyableDatasetTest {
private FileSystem localFs;
@BeforeTest
public void before()
throws IOException {
this.localFs = FileSystem.getLocal(new Configuration());
}
/**
* Test the {@link TimestampBasedCopyableDataset} constructor with different config options.
*/
@Test
public void testConfigOptions() {
Properties props = new Properties();
props.put(TimestampBasedCopyableDataset.COPY_POLICY, TimeBasedCopyPolicyForTest.class.getName());
props.put(TimestampBasedCopyableDataset.DATASET_VERSION_FINDER,
TimestampedDatasetVersionFinderForTest.class.getName());
TimestampBasedCopyableDataset copyabledataset1 =
new TimestampBasedCopyableDataset(localFs, props, new Path("dummy"));
Assert.assertEquals(copyabledataset1.getDatasetVersionFinder().getClass().getName(),
TimestampedDatasetVersionFinderForTest.class.getName());
Assert.assertEquals(copyabledataset1.getVersionSelectionPolicy().getClass().getName(),
TimeBasedCopyPolicyForTest.class.getName());
// Change the version finder
props.put(TimestampBasedCopyableDataset.DATASET_VERSION_FINDER, VersionFinderDoNothingForTest.class.getName());
TimestampBasedCopyableDataset copyabledataset2 =
new TimestampBasedCopyableDataset(localFs, props, new Path("dummy"));
Assert.assertEquals(copyabledataset2.getDatasetVersionFinder().getClass().getName(),
VersionFinderDoNothingForTest.class.getName());
Assert.assertEquals(copyabledataset2.getVersionSelectionPolicy().getClass().getName(),
TimeBasedCopyPolicyForTest.class.getName());
}
/**
* Test {@link TimestampBasedCopyableDataset.CopyableFileGenerator}'s logic to determine copyable files.
*/
@Test
public void testIsCopyableFile()
throws IOException, InterruptedException {
Path testRoot = new Path("testCopyableFileGenerator");
Path srcRoot = new Path(testRoot, "datasetRoot");
String versionDir = "dummyversion";
Path versionPath = new Path(srcRoot, versionDir);
Path targetDir = new Path(testRoot, "target");
if (this.localFs.exists(testRoot)) {
this.localFs.delete(testRoot, true);
}
this.localFs.mkdirs(versionPath);
Path srcfile = new Path(versionPath, "file1");
this.localFs.create(srcfile);
this.localFs.mkdirs(targetDir);
Properties props = new Properties();
props.put(TimestampBasedCopyableDataset.COPY_POLICY, TimeBasedCopyPolicyForTest.class.getName());
props.put(TimestampBasedCopyableDataset.DATASET_VERSION_FINDER,
TimestampedDatasetVersionFinderForTest.class.getName());
Path datasetRootPath = this.localFs.getFileStatus(srcRoot).getPath();
TimestampBasedCopyableDataset copyabledataset = new TimestampBasedCopyableDataset(localFs, props, datasetRootPath);
TimestampedDatasetVersion srcVersion = new TimestampedDatasetVersion(new DateTime(), versionPath);
class SimpleCopyableFileGenerator extends TimestampBasedCopyableDataset.CopyableFileGenerator {
public SimpleCopyableFileGenerator(TimestampBasedCopyableDataset copyableDataset, FileSystem srcFs,
FileSystem targetFs, CopyConfiguration configuration, TimestampedDatasetVersion copyableVersion,
ConcurrentLinkedQueue<CopyableFile> copyableFileList) {
super(srcFs, targetFs, configuration, copyableDataset.datasetRoot(), configuration.getPublishDir(),
copyableVersion.getDateTime(), copyableVersion.getPaths(), copyableFileList,
copyableDataset.copyableFileFilter());
}
@Override
protected CopyableFile generateCopyableFile(FileStatus singleFile, Path targetPath, long timestampFromPath,
Path locationToCopy)
throws IOException {
CopyableFile mockCopyableFile = mock(CopyableFile.class);
when(mockCopyableFile.getFileSet()).thenReturn(singleFile.getPath().toString());
return mockCopyableFile;
}
}
// When srcFile exists on src but not on target, srcFile should be included in the copyableFileList.
CopyConfiguration configuration1 = mock(CopyConfiguration.class);
when(configuration1.getPublishDir()).thenReturn(localFs.getFileStatus(targetDir).getPath());
ConcurrentLinkedQueue<CopyableFile> copyableFileList1 = new ConcurrentLinkedQueue<>();
TimestampBasedCopyableDataset.CopyableFileGenerator copyFileGenerator1 =
new SimpleCopyableFileGenerator(copyabledataset, localFs, localFs, configuration1, srcVersion,
copyableFileList1);
copyFileGenerator1.run();
Assert.assertEquals(copyableFileList1.size(), 1);
Assert.assertEquals(copyableFileList1.poll().getFileSet(), localFs.getFileStatus(srcfile).getPath().toString());
// When files exist on both locations but with different timestamp, the result should only include newer src files.
String noNeedToCopyFile = "file2";
Path oldSrcFile = new Path(versionPath, noNeedToCopyFile);
this.localFs.create(oldSrcFile);
Thread.sleep(100);
Path newTargetfile = new Path(targetDir, new Path(versionDir, noNeedToCopyFile));
this.localFs.create(newTargetfile);
copyFileGenerator1.run();
Assert.assertEquals(copyableFileList1.size(), 1);
Assert.assertEquals(copyableFileList1.poll().getFileSet(), localFs.getFileStatus(srcfile).getPath().toString());
// When srcFile exists on both locations and have the same modified timestamp, it should not be included in copyableFileList.
CopyConfiguration configuration2 = mock(CopyConfiguration.class);
when(configuration2.getPublishDir()).thenReturn(localFs.getFileStatus(datasetRootPath).getPath());
ConcurrentLinkedQueue<CopyableFile> copyableFileList2 = new ConcurrentLinkedQueue<>();
TimestampBasedCopyableDataset.CopyableFileGenerator copyFileGenerator2 =
new SimpleCopyableFileGenerator(copyabledataset, localFs, localFs, configuration2, srcVersion,
copyableFileList2);
copyFileGenerator2.run();
Assert.assertEquals(copyableFileList2.size(), 0);
this.localFs.delete(testRoot, true);
}
/**
* Test {@link TimestampBasedCopyableDataset.CopyableFileGenerator} when src location is empty and also when it is null.
*/
@Test(expectedExceptions = RuntimeException.class)
public void testCopyableFileGenerator() {
Properties props = new Properties();
props.put(TimestampBasedCopyableDataset.COPY_POLICY, TimeBasedCopyPolicyForTest.class.getName());
props.put(TimestampBasedCopyableDataset.DATASET_VERSION_FINDER,
TimestampedDatasetVersionFinderForTest.class.getName());
TimestampBasedCopyableDataset copyabledataset =
new TimestampBasedCopyableDataset(localFs, props, new Path("dummy"));
CopyConfiguration configuration = mock(CopyConfiguration.class);
when(configuration.getPublishDir()).thenReturn(new Path("publishDir"));
ConcurrentLinkedQueue<CopyableFile> copyableFileList = new ConcurrentLinkedQueue<>();
// The src path is empty.
TimestampedDatasetVersion emptyVersion = new TimestampedDatasetVersion(new DateTime(), new Path("dummy2"));
TimestampBasedCopyableDataset.CopyableFileGenerator emptyGenerator =
copyabledataset.getCopyableFileGenetator(localFs, configuration, emptyVersion, copyableFileList);
emptyGenerator.run();
Assert.assertEquals(copyableFileList.size(), 0);
// The src path is null.
TimestampedDatasetVersion versionHasNullPath = new TimestampedDatasetVersion(new DateTime(), null);
TimestampBasedCopyableDataset.CopyableFileGenerator exceptionGenerator =
copyabledataset.getCopyableFileGenetator(localFs, configuration, versionHasNullPath, copyableFileList);
exceptionGenerator.run();
}
/**
* Test the parallel execution to get copyable files in {@link TimestampBasedCopyableDataset#getCopyableFiles(FileSystem, CopyConfiguration)}.
*/
@Test
public void testGetCopyableFiles()
throws IOException {
Properties props = new Properties();
props.put(TimestampBasedCopyableDataset.COPY_POLICY, TimeBasedCopyPolicyForTest.class.getName());
props.put(TimestampBasedCopyableDataset.DATASET_VERSION_FINDER,
TimestampedDatasetVersionFinderForTest.class.getName());
TimestampBasedCopyableDataset copyabledataset =
new TimestampBasedCopyableDatasetForTest(localFs, props, new Path("/data/tracking/PVE"));
Collection<CopyableFile> copyableFiles = copyabledataset.getCopyableFiles(localFs, null);
/**
* {@link TimestampedDatasetVersionFinderForTest} will return three versions, and each version will contain two files.
* So the total number of copyableFiles should be 6, and all should follow the pattern: dummy\/[\\d]\*\/file[12].
*/
Assert.assertEquals(copyableFiles.size(), 6);
Pattern pattern = Pattern.compile("dummy/[\\d]*/file[12]");
Set<String> resultFilesets = Sets.newHashSet();
for (CopyableFile copyableFile : copyableFiles) {
String copyableFileset = copyableFile.getFileSet();
Assert.assertTrue(pattern.matcher(copyableFileset).matches());
resultFilesets.add(copyableFileset);
}
Assert.assertEquals(resultFilesets.size(), 6);
}
public static class TimestampBasedCopyableDatasetForTest extends TimestampBasedCopyableDataset {
public TimestampBasedCopyableDatasetForTest(FileSystem fs, Properties props, Path datasetRoot)
throws IOException {
super(fs, props, datasetRoot);
}
@Override
protected CopyableFileGenerator getCopyableFileGenetator(FileSystem targetFs, CopyConfiguration configuration,
TimestampedDatasetVersion copyableVersion, ConcurrentLinkedQueue<CopyableFile> copyableFileList) {
return new CopyableFileGeneratorForTest(copyableFileList, copyableVersion);
}
private class CopyableFileGeneratorForTest extends TimestampBasedCopyableDataset.CopyableFileGenerator {
public CopyableFileGeneratorForTest(ConcurrentLinkedQueue<CopyableFile> copyableFileList,
TimestampedDatasetVersion copyableVersion) {
super(null, null, null, null, null, null, null, null, null);
this.copyableFileList = copyableFileList;
this.copyableVersion = copyableVersion;
}
ConcurrentLinkedQueue<CopyableFile> copyableFileList;
TimestampedDatasetVersion copyableVersion;
@Override
public void run() {
CopyableFile mockCopyableFile1 = mock(CopyableFile.class);
String file1 = new Path(copyableVersion.getPaths().iterator().next(), "file1").toString();
when(mockCopyableFile1.getFileSet()).thenReturn(file1);
CopyableFile mockCopyableFile2 = mock(CopyableFile.class);
String file2 = new Path(copyableVersion.getPaths().iterator().next(), "file2").toString();
when(mockCopyableFile2.getFileSet()).thenReturn(file2);
try {
Thread.sleep(new Random().nextInt(3000));
} catch (InterruptedException e) {
e.printStackTrace();
}
copyableFileList.add(mockCopyableFile1);
copyableFileList.add(mockCopyableFile2);
}
}
}
public static class TimeBasedCopyPolicyForTest implements VersionSelectionPolicy<TimestampedDatasetVersion> {
public TimeBasedCopyPolicyForTest(Properties props) {
//do nothing
}
@Override
public Class<? extends FileSystemDatasetVersion> versionClass() {
return null;
}
@Override
public Collection<TimestampedDatasetVersion> listSelectedVersions(List<TimestampedDatasetVersion> allVersions) {
return allVersions;
}
}
public static class TimestampedDatasetVersionFinderForTest implements VersionFinder<TimestampedDatasetVersion> {
DateTime start;
int range;
public TimestampedDatasetVersionFinderForTest(FileSystem fs, Properties props) {
start = new DateTime(2006, 1, 1, 0, 0);
range = 3650;
}
@Override
public Class<? extends DatasetVersion> versionClass() {
return null;
}
@Override
public Collection<TimestampedDatasetVersion> findDatasetVersions(Dataset dataset)
throws IOException {
Random ran = new Random();
Path dummyPath = new Path("dummy");
DateTime dt1 = start.plusDays(ran.nextInt(range));
Path path1 = new Path(dummyPath, Long.toString(dt1.getMillis()));
TimestampedDatasetVersion version1 = new TimestampedDatasetVersion(dt1, path1);
DateTime dt2 = dt1.plusDays(ran.nextInt(range));
Path path2 = new Path(dummyPath, Long.toString(dt2.getMillis()));
TimestampedDatasetVersion version2 = new TimestampedDatasetVersion(dt2, path2);
DateTime dt3 = dt2.plusDays(ran.nextInt(range));
Path path3 = new Path(dummyPath, Long.toString(dt3.getMillis()));
TimestampedDatasetVersion version3 = new TimestampedDatasetVersion(dt3, path3);
return Lists.newArrayList(version1, version2, version3);
}
}
public static class VersionFinderDoNothingForTest implements VersionFinder<TimestampedDatasetVersion> {
public VersionFinderDoNothingForTest(FileSystem fs, Properties props) {
}
@Override
public Class<? extends DatasetVersion> versionClass() {
return null;
}
@Override
public Collection<TimestampedDatasetVersion> findDatasetVersions(Dataset dataset)
throws IOException {
return Lists.newArrayList();
}
}
}
| 2,341 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/ReadyCopyableFileFilterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.fs.FileSystem;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import org.apache.gobblin.util.PathUtils;
@Test(groups = {"gobblin.data.management.copy"})
public class ReadyCopyableFileFilterTest {
@Test
public void testFilter() throws Exception {
CopyableFileFilter readyFilter = new ReadyCopyableFileFilter();
List<CopyableFile> copyableFiles = Lists.newArrayList();
copyableFiles.add(CopyableFileUtils.getTestCopyableFile());
copyableFiles.add(CopyableFileUtils.getTestCopyableFile());
copyableFiles.add(CopyableFileUtils.getTestCopyableFile());
FileSystem sourceFs = Mockito.mock(FileSystem.class);
Mockito.when(sourceFs.exists(PathUtils.addExtension(copyableFiles.get(0).getOrigin().getPath(), ".ready")))
.thenReturn(false);
Mockito.when(sourceFs.exists(PathUtils.addExtension(copyableFiles.get(1).getOrigin().getPath(), ".ready")))
.thenReturn(true);
Mockito.when(sourceFs.exists(PathUtils.addExtension(copyableFiles.get(2).getOrigin().getPath(), ".ready")))
.thenReturn(false);
Collection<CopyableFile> filtered = readyFilter.filter(sourceFs, null, copyableFiles);
Assert.assertEquals(filtered.size(), 1);
}
}
| 2,342 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/CopyableFileUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.Maps;
/**
* Utils class to generate dummy {@link CopyableFile}s for testing. Random strings are generated for null paths.
*/
public class CopyableFileUtils {
public static CopyableFile createTestCopyableFile(String resourcePath) throws IOException {
FileSystem fs = FileSystem.getLocal(new Configuration());
fs.create(new Path(resourcePath));
FileStatus status = new FileStatus(0l, false, 0, 0l, 0l, new Path(resourcePath));
return new CopyableFile(status, new Path(getRandomPath()), null, null, null,
PreserveAttributes.fromMnemonicString(""), "", 0 ,0, Maps.<String, String>newHashMap(), "", null);
}
public static CopyableFile getTestCopyableFile() {
return getTestCopyableFile(0L, null);
}
public static CopyableFile getTestCopyableFile(String resourcePath) {
return getTestCopyableFile(resourcePath, null);
}
public static CopyableFile getTestCopyableFile(Long size, OwnerAndPermission ownerAndPermission) {
return getTestCopyableFile(null, null, size, ownerAndPermission);
}
public static CopyableFile getTestCopyableFile(OwnerAndPermission ownerAndPermission) {
return getTestCopyableFile(null, null, 0L, ownerAndPermission);
}
public static CopyableFile getTestCopyableFile(String resourcePath, OwnerAndPermission ownerAndPermission) {
return getTestCopyableFile(resourcePath, null, 0L, ownerAndPermission);
}
public static CopyableFile getTestCopyableFile(String resourcePath, String relativePath, Long size,
OwnerAndPermission ownerAndPermission) {
return getTestCopyableFile(resourcePath, getRandomPath(), relativePath, size, ownerAndPermission);
}
public static CopyableFile getTestCopyableFile(String resourcePath, String destinationPath, String relativePath,
Long size, OwnerAndPermission ownerAndPermission) {
FileStatus status = null;
if (resourcePath == null) {
resourcePath = getRandomPath();
status = new FileStatus(size, false, 0, 0l, 0l, new Path(resourcePath));
} else {
String filePath = CopyableFileUtils.class.getClassLoader().getResource(resourcePath).getFile();
status = new FileStatus(size, false, 0, 0l, 0l, new Path(filePath));
}
if (relativePath == null) {
relativePath = getRandomPath();
}
Path destinationRelativePath = new Path(relativePath);
return new CopyableFile(status, new Path(destinationPath), ownerAndPermission, null, null,
PreserveAttributes.fromMnemonicString(""), "", 0, 0, Maps.<String, String>newHashMap(), "", null);
}
private static String getRandomPath() {
return new Path(RandomStringUtils.randomAlphabetic(6), RandomStringUtils.randomAlphabetic(6)).toString();
}
}
| 2,343 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/MockHiveDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import com.google.common.base.Optional;
import com.google.common.base.Throwables;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.Lists;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.data.management.copy.hive.HiveTargetPathHelper;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.Table;
public class MockHiveDatasetFinder extends HiveDatasetFinder {
Path tempDirRoot;
SourceState state;
public MockHiveDatasetFinder(LocalFileSystem fs, Properties properties,
EventSubmitter eventSubmitter) throws IOException {
super(fs, properties, eventSubmitter);
this.tempDirRoot = new Path(properties.getProperty("tempDirRoot"));
}
public Collection<DbAndTable> getTables() throws IOException {
List<DbAndTable> tables = Lists.newArrayList();
for (int i = 0; i < 3; i++) {
tables.add(new DbAndTable("testDB", "table" + i));
}
return tables;
}
@Override
public Iterator<HiveDataset> getDatasetsIterator() throws IOException {
return new AbstractIterator<HiveDataset>() {
private Iterator<DbAndTable> tables = getTables().iterator();
@Override
protected HiveDataset computeNext() {
try {
while (this.tables.hasNext()) {
DbAndTable dbAndTable = this.tables.next();
File dbPath = new File(tempDirRoot + "/testPath/testDB/" + dbAndTable.getTable());
fs.mkdirs(new Path(dbPath.getAbsolutePath()));
Properties hiveProperties = new Properties();
hiveProperties.setProperty(ConfigurationKeys.USE_DATASET_LOCAL_WORK_DIR, "true");
hiveProperties.setProperty(HiveTargetPathHelper.COPY_TARGET_TABLE_PREFIX_TOBE_REPLACED, tempDirRoot + "/testPath");
hiveProperties.setProperty(HiveTargetPathHelper.COPY_TARGET_TABLE_PREFIX_REPLACEMENT, tempDirRoot + "/targetPath");
Table table = new Table(Table.getEmptyTable(dbAndTable.getDb(), dbAndTable.getTable()));
table.setDataLocation(new Path(dbPath.getPath()));
HiveMetastoreClientPool pool = HiveMetastoreClientPool.get(new Properties(), Optional.absent());
HiveDataset dataset = new HiveDataset(new LocalFileSystem(), pool, table, hiveProperties);
return dataset;
}
} catch (IOException e) {
Throwables.propagate(e);
}
return endOfData();
}
};
}
}
| 2,344 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/CopySourcePrioritizationTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.data.management.copy.prioritization.FileSetComparator;
import org.apache.gobblin.data.management.dataset.DatasetUtils;
import org.apache.gobblin.data.management.partition.FileSet;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.IterableDatasetFinder;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.JobLauncherUtils;
public class CopySourcePrioritizationTest {
@Test
public void testNoPrioritization() throws Exception {
SourceState state = new SourceState();
state.setProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, "file:///");
state.setProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, "file:///");
state.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target/dir");
state.setProp(DatasetUtils.DATASET_PROFILE_CLASS_KEY,
MyFinder.class.getName());
CopySource source = new CopySource();
List<WorkUnit> workunits = source.getWorkunits(state);
workunits = JobLauncherUtils.flattenWorkUnits(workunits);
Assert.assertEquals(workunits.size(), MyFinder.DATASETS * MyDataset.FILE_SETS * MyFileSet.FILES);
}
@Test
public void testUnprioritizedFileLimit() throws Exception {
SourceState state = new SourceState();
state.setProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, "file:///");
state.setProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, "file:///");
state.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target/dir");
state.setProp(DatasetUtils.DATASET_PROFILE_CLASS_KEY,
MyFinder.class.getName());
// Disable parallel listing to make work unit selection deterministic
state.setProp(CopySource.MAX_CONCURRENT_LISTING_SERVICES, 1);
state.setProp(CopyConfiguration.MAX_COPY_PREFIX + "." + CopyResourcePool.ENTITIES_KEY, 10);
state.setProp(CopyConfiguration.MAX_COPY_PREFIX + "." + CopyResourcePool.TOLERANCE_KEY, 1);
CopySource source = new CopySource();
List<WorkUnit> workunits = source.getWorkunits(state);
workunits = JobLauncherUtils.flattenWorkUnits(workunits);
// Check limited to 10 entities
Assert.assertEquals(workunits.size(), 10);
List<String> paths = extractPaths(workunits);
Assert.assertTrue(paths.contains("d0.fs0.f1"));
Assert.assertTrue(paths.contains("d0.fs0.f2"));
Assert.assertTrue(paths.contains("d0.fs1.f1"));
Assert.assertTrue(paths.contains("d0.fs1.f2"));
Assert.assertTrue(paths.contains("d0.fs2.f1"));
Assert.assertTrue(paths.contains("d0.fs2.f2"));
Assert.assertTrue(paths.contains("d0.fs3.f1"));
Assert.assertTrue(paths.contains("d0.fs3.f2"));
Assert.assertTrue(paths.contains("d1.fs0.f1"));
Assert.assertTrue(paths.contains("d1.fs0.f2"));
}
// This test uses a prioritizer that preferentially copies the lower file sets of each dataset
@Test
public void testPrioritization() throws Exception {
SourceState state = new SourceState();
state.setProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, "file:///");
state.setProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, "file:///");
state.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target/dir");
state.setProp(DatasetUtils.DATASET_PROFILE_CLASS_KEY,
MyFinder.class.getName());
state.setProp(CopyConfiguration.PRIORITIZER_ALIAS_KEY, MyPrioritizer.class.getName());
state.setProp(CopyConfiguration.MAX_COPY_PREFIX + "." + CopyResourcePool.ENTITIES_KEY, 8);
state.setProp(CopyConfiguration.MAX_COPY_PREFIX + "." + CopyResourcePool.TOLERANCE_KEY, 1);
CopySource source = new CopySource();
List<WorkUnit> workunits = source.getWorkunits(state);
workunits = JobLauncherUtils.flattenWorkUnits(workunits);
Assert.assertEquals(workunits.size(), 8);
List<String> paths = extractPaths(workunits);
Assert.assertTrue(paths.contains("d0.fs0.f1"));
Assert.assertTrue(paths.contains("d0.fs0.f2"));
Assert.assertTrue(paths.contains("d0.fs1.f1"));
Assert.assertTrue(paths.contains("d0.fs1.f2"));
Assert.assertTrue(paths.contains("d1.fs0.f1"));
Assert.assertTrue(paths.contains("d1.fs0.f2"));
Assert.assertTrue(paths.contains("d1.fs1.f1"));
Assert.assertTrue(paths.contains("d1.fs1.f2"));
}
private List<String> extractPaths(List<WorkUnit> workUnits) {
List<String> paths = Lists.newArrayList();
for (WorkUnit wu : workUnits) {
CopyableFile cf = (CopyableFile) CopySource.deserializeCopyEntity(wu);
paths.add(cf.getOrigin().getPath().toString());
}
return paths;
}
public static class MyFinder implements IterableDatasetFinder<IterableCopyableDataset> {
public static final int DATASETS = 2;
@Override
public List<IterableCopyableDataset> findDatasets()
throws IOException {
return null;
}
@Override
public Path commonDatasetRoot() {
return new Path("/");
}
@Override
public Iterator<IterableCopyableDataset> getDatasetsIterator()
throws IOException {
List<IterableCopyableDataset> datasets = Lists.newArrayList();
for (int i = 0; i < DATASETS; i++) {
datasets.add(new MyDataset("d" + i));
}
return datasets.iterator();
}
}
@AllArgsConstructor
public static class MyDataset implements IterableCopyableDataset {
public static final int FILE_SETS = 4;
private final String name;
@Override
public String datasetURN() {
return this.name;
}
@Override
public Iterator<FileSet<CopyEntity>> getFileSetIterator(FileSystem targetFs, CopyConfiguration configuration)
throws IOException {
List<FileSet<CopyEntity>> fileSets = Lists.newArrayList();
for (int i = 0; i < FILE_SETS; i++) {
fileSets.add(new MyFileSet(this.name + ".fs" + Integer.toString(i), this, i));
}
return fileSets.iterator();
}
}
public static class MyFileSet extends FileSet<CopyEntity> {
public static final int FILES = 2;
@Getter
private final int filesetNumberInDataset;
public MyFileSet(String name, Dataset dataset, int filesetNumberInDataset) {
super(name, dataset);
this.filesetNumberInDataset = filesetNumberInDataset;
}
@Override
protected Collection<CopyEntity> generateCopyEntities()
throws IOException {
CopyableFile cf1 = createCopyableFile(getName() + ".f1", Integer.toString(this.filesetNumberInDataset));
CopyableFile cf2 = createCopyableFile(getName() + ".f2", Integer.toString(this.filesetNumberInDataset));
return Lists.<CopyEntity>newArrayList(cf1, cf2);
}
}
private static CopyableFile createCopyableFile(String path, String fileSet) {
return new CopyableFile(new FileStatus(0, false, 0, 0, 0, new Path(path)), new Path(path),
new OwnerAndPermission("owner", "group", FsPermission.getDefault()), null, null,
PreserveAttributes.fromMnemonicString(""), fileSet, 0, 0, Maps.<String, String>newHashMap(), "", null);
}
public static class MyPrioritizer implements FileSetComparator {
@Override
public int compare(FileSet<CopyEntity> o1, FileSet<CopyEntity> o2) {
return Integer.compare(((MyFileSet) o1).getFilesetNumberInDataset(), ((MyFileSet) o2) .getFilesetNumberInDataset());
}
}
}
| 2,345 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/RecursiveCopyableDatasetTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.copy.entities.CommitStepCopyEntity;
import org.apache.gobblin.util.commit.DeleteFileCommitStep;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import javax.annotation.Nullable;
import lombok.Data;
public class RecursiveCopyableDatasetTest {
@Test
public void testSimpleCopy() throws Exception {
Path source = new Path("/source");
Path target = new Path("/target");
List<FileStatus> sourceFiles = Lists.newArrayList(createFileStatus(source, "file1"), createFileStatus(source, "file2"));
List<FileStatus> targetFiles = Lists.newArrayList(createFileStatus(target, "file3"));
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, target.toString());
RecursiveCopyableDataset dataset = new TestRecursiveCopyableDataset(source, target, sourceFiles, targetFiles, properties);
Collection<? extends CopyEntity> copyableFiles = dataset.getCopyableFiles(FileSystem.getLocal(new Configuration()),
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties).build());
Assert.assertEquals(copyableFiles.size(), 2);
ClassifiedFiles classifiedFiles = classifyFiles(copyableFiles);
Assert.assertTrue(classifiedFiles.getPathsToCopy().containsKey(new Path(source, "file1")));
Assert.assertEquals(classifiedFiles.getPathsToCopy().get(new Path(source, "file1")), new Path(target, "file1"));
Assert.assertTrue(classifiedFiles.getPathsToCopy().containsKey(new Path(source, "file2")));
Assert.assertEquals(classifiedFiles.getPathsToCopy().get(new Path(source, "file2")), new Path(target, "file2"));
Assert.assertEquals(classifiedFiles.getPathsToDelete().size(), 0);
}
@Test
public void testCopyWithNonConflictingCollision() throws Exception {
Path source = new Path("/source");
Path target = new Path("/target");
List<FileStatus> sourceFiles = Lists.newArrayList(createFileStatus(source, "file1", 1), createFileStatus(source, "file2"));
List<FileStatus> targetFiles = Lists.newArrayList(createFileStatus(target, "file1", 1));
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, target.toString());
RecursiveCopyableDataset dataset = new TestRecursiveCopyableDataset(source, target, sourceFiles, targetFiles, properties);
Collection<? extends CopyEntity> copyableFiles = dataset.getCopyableFiles(FileSystem.getLocal(new Configuration()),
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties).build());
Assert.assertEquals(copyableFiles.size(), 1);
ClassifiedFiles classifiedFiles = classifyFiles(copyableFiles);
Assert.assertTrue(classifiedFiles.getPathsToCopy().containsKey(new Path(source, "file2")));
Assert.assertEquals(classifiedFiles.getPathsToCopy().get(new Path(source, "file2")), new Path(target, "file2"));
Assert.assertEquals(classifiedFiles.getPathsToDelete().size(), 0);
}
@Test
public void testCopyWithConflictingCollisionDueToSize() throws Exception {
Path source = new Path("/source");
Path target = new Path("/target");
List<FileStatus> sourceFiles = Lists.newArrayList(createFileStatus(source, "file1", 1), createFileStatus(source, "file2"));
List<FileStatus> targetFiles = Lists.newArrayList(createFileStatus(target, "file1", 2));
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, target.toString());
RecursiveCopyableDataset dataset = new TestRecursiveCopyableDataset(source, target, sourceFiles, targetFiles, properties);
try {
Collection<? extends CopyEntity> copyableFiles = dataset.getCopyableFiles(FileSystem.getLocal(new Configuration()),
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties).build());
Assert.fail();
} catch (IOException ioe) {
// should throw exception due to collision
}
}
@Test
public void testCopyWithConflictingCollisionDueToModtime() throws Exception {
Path source = new Path("/source");
Path target = new Path("/target");
List<FileStatus> sourceFiles = Lists.newArrayList(createFileStatus(source, "file1", 1, 10), createFileStatus(source, "file2"));
List<FileStatus> targetFiles = Lists.newArrayList(createFileStatus(target, "file1", 1, 9));
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, target.toString());
RecursiveCopyableDataset dataset = new TestRecursiveCopyableDataset(source, target, sourceFiles, targetFiles, properties);
try {
Collection<? extends CopyEntity> copyableFiles = dataset.getCopyableFiles(FileSystem.getLocal(new Configuration()),
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties).build());
Assert.fail();
} catch (IOException ioe) {
// should throw exception due to collision
}
}
@Test
public void testCopyWithUpdate() throws Exception {
Path source = new Path("/source");
Path target = new Path("/target");
FileStatus targetFile1 = createFileStatus(target, "file1", 2);
List<FileStatus> sourceFiles = Lists.newArrayList(createFileStatus(source, "file1", 1), createFileStatus(source, "file2"));
List<FileStatus> targetFiles = Lists.newArrayList(targetFile1);
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, target.toString());
properties.setProperty(RecursiveCopyableDataset.UPDATE_KEY, "true");
RecursiveCopyableDataset dataset = new TestRecursiveCopyableDataset(source, target, sourceFiles, targetFiles, properties);
FileSystem targetFsUnderlying = FileSystem.getLocal(new Configuration());
FileSystem targetFs = Mockito.spy(targetFsUnderlying);
Mockito.doReturn(targetFile1).when(targetFs).getFileStatus(new Path(target, "file1"));
Collection<? extends CopyEntity> copyableFiles = dataset.getCopyableFiles(targetFs,
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties).build());
Assert.assertEquals(copyableFiles.size(), 3);
ClassifiedFiles classifiedFiles = classifyFiles(copyableFiles);
Assert.assertTrue(classifiedFiles.getPathsToCopy().containsKey(new Path(source, "file1")));
Assert.assertEquals(classifiedFiles.getPathsToCopy().get(new Path(source, "file1")), new Path(target, "file1"));
Assert.assertTrue(classifiedFiles.getPathsToCopy().containsKey(new Path(source, "file2")));
Assert.assertEquals(classifiedFiles.getPathsToCopy().get(new Path(source, "file2")), new Path(target, "file2"));
Assert.assertEquals(classifiedFiles.getPathsToDelete().size(), 1);
Assert.assertTrue(classifiedFiles.getPathsToDelete().contains(new Path(target, "file1")));
}
@Test
public void testCopyWithDeleteTarget() throws Exception {
Path source = new Path("/source");
Path target = new Path("/target");
List<FileStatus> sourceFiles = Lists.newArrayList(createFileStatus(source, "file1"));
List<FileStatus> targetFiles = Lists.newArrayList(createFileStatus(target, "file3"));
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, target.toString());
properties.setProperty(RecursiveCopyableDataset.DELETE_KEY, "true");
RecursiveCopyableDataset dataset = new TestRecursiveCopyableDataset(source, target, sourceFiles, targetFiles, properties);
Collection<? extends CopyEntity> copyableFiles = dataset.getCopyableFiles(FileSystem.getLocal(new Configuration()),
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties).build());
Assert.assertEquals(copyableFiles.size(), 2);
ClassifiedFiles classifiedFiles = classifyFiles(copyableFiles);
Assert.assertTrue(classifiedFiles.getPathsToCopy().containsKey(new Path(source, "file1")));
Assert.assertEquals(classifiedFiles.getPathsToCopy().get(new Path(source, "file1")), new Path(target, "file1"));
Assert.assertEquals(classifiedFiles.getPathsToDelete().size(), 1);
Assert.assertTrue(classifiedFiles.getPathsToDelete().contains(new Path(target, "file3")));
CommitStepCopyEntity entity = (CommitStepCopyEntity) Iterables.filter(copyableFiles, new Predicate<CopyEntity>() {
@Override
public boolean apply(@Nullable CopyEntity copyEntity) {
return copyEntity instanceof CommitStepCopyEntity;
}
}).iterator().next();
DeleteFileCommitStep step = (DeleteFileCommitStep) entity.getStep();
Assert.assertFalse(step.getParentDeletionLimit().isPresent());
}
@Test
public void testCopyWithDeleteTargetAndDeleteParentDirectories() throws Exception {
Path source = new Path("/source");
Path target = new Path("/target");
List<FileStatus> sourceFiles = Lists.newArrayList(createFileStatus(source, "file1"));
List<FileStatus> targetFiles = Lists.newArrayList(createFileStatus(target, "file3"));
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, target.toString());
properties.setProperty(RecursiveCopyableDataset.DELETE_EMPTY_DIRECTORIES_KEY, "true");
properties.setProperty(RecursiveCopyableDataset.DELETE_KEY, "true");
RecursiveCopyableDataset dataset = new TestRecursiveCopyableDataset(source, target, sourceFiles, targetFiles, properties);
Collection<? extends CopyEntity> copyableFiles = dataset.getCopyableFiles(FileSystem.getLocal(new Configuration()),
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties).build());
Assert.assertEquals(copyableFiles.size(), 2);
ClassifiedFiles classifiedFiles = classifyFiles(copyableFiles);
Assert.assertTrue(classifiedFiles.getPathsToCopy().containsKey(new Path(source, "file1")));
Assert.assertEquals(classifiedFiles.getPathsToCopy().get(new Path(source, "file1")), new Path(target, "file1"));
Assert.assertEquals(classifiedFiles.getPathsToDelete().size(), 1);
Assert.assertTrue(classifiedFiles.getPathsToDelete().contains(new Path(target, "file3")));
CommitStepCopyEntity entity = (CommitStepCopyEntity) Iterables.filter(copyableFiles, new Predicate<CopyEntity>() {
@Override
public boolean apply(@Nullable CopyEntity copyEntity) {
return copyEntity instanceof CommitStepCopyEntity;
}
}).iterator().next();
DeleteFileCommitStep step = (DeleteFileCommitStep) entity.getStep();
Assert.assertTrue(step.getParentDeletionLimit().isPresent());
Assert.assertEquals(step.getParentDeletionLimit().get(), target);
}
@Test
public void testCorrectComputationOfTargetPathsWhenUsingGlob() throws Exception {
Path source = new Path("/source/directory");
Path target = new Path("/target");
List<FileStatus> sourceFiles = Lists.newArrayList(createFileStatus(source, "file1"));
List<FileStatus> targetFiles = Lists.newArrayList();
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, target.toString());
FileSystem sourceUnderlyingFS = FileSystem.getLocal(new Configuration());
FileSystem sourceFs = Mockito.spy(sourceUnderlyingFS);
Mockito.doReturn(new FileStatus(0, true, 0, 0, 0, source)).when(sourceFs).getFileStatus(source);
RecursiveCopyableDataset dataset =
new TestRecursiveCopyableDataset(source, new Path(target, "directory"), sourceFiles, targetFiles, properties,
new Path("/source/*"), sourceFs);
Collection<? extends CopyEntity> copyableFiles = dataset.getCopyableFiles(FileSystem.get(new Configuration()),
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties).build());
Assert.assertEquals(copyableFiles.size(), 1);
ClassifiedFiles classifiedFiles = classifyFiles(copyableFiles);
Assert.assertTrue(classifiedFiles.getPathsToCopy().containsKey(new Path(source, "file1")));
Assert.assertEquals(classifiedFiles.getPathsToCopy().get(new Path(source, "file1")), new Path(target, "directory/file1"));
}
private ClassifiedFiles classifyFiles(Collection<? extends CopyEntity> copyEntities) {
Map<Path, Path> pathsToCopy = Maps.newHashMap();
Set<Path> pathsToDelete = Sets.newHashSet();
for (CopyEntity ce : copyEntities) {
if (ce instanceof CopyableFile) {
pathsToCopy.put(((CopyableFile) ce).getOrigin().getPath(), ((CopyableFile) ce).getDestination());
}
if (ce instanceof CommitStepCopyEntity) {
CommitStep step = ((CommitStepCopyEntity) ce).getStep();
if (step instanceof DeleteFileCommitStep) {
for (FileStatus status : ((DeleteFileCommitStep) step).getPathsToDelete()) {
pathsToDelete.add(status.getPath());
}
}
}
}
return new ClassifiedFiles(pathsToCopy, pathsToDelete);
}
@Data
private class ClassifiedFiles {
private final Map<Path, Path> pathsToCopy;
private final Set<Path> pathsToDelete;
}
private FileStatus createFileStatus(Path root, String relative) {
return createFileStatus(root, relative, 0, 0);
}
private FileStatus createFileStatus(Path root, String relative, long length) {
return createFileStatus(root, relative, length, 0);
}
private FileStatus createFileStatus(Path root, String relative, long length, long modtime) {
return new FileStatus(length, false, 0, 0, modtime, new Path(root, relative));
}
private static class TestRecursiveCopyableDataset extends RecursiveCopyableDataset {
private final Path source;
private final Path target;
private final List<FileStatus> sourceFiles;
private final List<FileStatus> targetFiles;
public TestRecursiveCopyableDataset(Path source,
Path target, List<FileStatus> sourceFiles, List<FileStatus> targetFiles, Properties properties) throws IOException {
this(source, target, sourceFiles, targetFiles, properties, source, FileSystem.getLocal(new Configuration()));
}
public TestRecursiveCopyableDataset(Path source, Path target, List<FileStatus> sourceFiles, List<FileStatus> targetFiles,
Properties properties, Path glob, FileSystem sourceFs) throws IOException {
super(sourceFs, source, properties, glob);
this.source = source;
this.target = target;
this.sourceFiles = sourceFiles;
this.targetFiles = targetFiles;
}
@Override
protected List<FileStatus> getFilesAtPath(FileSystem fs, Path path, PathFilter fileFilter)
throws IOException {
if (path.equals(this.source)) {
return this.sourceFiles;
} else if (path.equals(this.target)) {
return this.targetFiles;
} else {
throw new RuntimeException("Not a recognized path. " + path);
}
}
}
}
| 2,346 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/ConcurrentBoundedWorkUnitListTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy;
import java.io.IOException;
import java.util.Comparator;
import java.util.List;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Splitter;
import com.google.common.collect.Maps;
import com.google.common.collect.Lists;
import org.apache.gobblin.data.management.dataset.DummyDataset;
import org.apache.gobblin.data.management.partition.FileSet;
import org.apache.gobblin.source.workunit.WorkUnit;
public class ConcurrentBoundedWorkUnitListTest {
private static final String ORIGIN_PATH = "/path/origin";
private static final String TARGET_PATH = "/path/target";
@Test
public void testBoundedAdd() throws IOException {
ConcurrentBoundedWorkUnitList list = new ConcurrentBoundedWorkUnitList(10,
new AllEqualComparator<FileSet<CopyEntity>>(), 1);
Assert.assertTrue(addFiles(list, "fs", 6));
Assert.assertFalse(list.hasRejectedFileSet());
Assert.assertFalse(addFiles(list, "fs", 5));
Assert.assertTrue(list.hasRejectedFileSet());
Assert.assertTrue(addFiles(list, "fs", 4));
Assert.assertFalse(addFiles(list, "fs", 1));
}
@Test
public void testNonStrictBoundAdd() throws IOException {
ConcurrentBoundedWorkUnitList list = new ConcurrentBoundedWorkUnitList(10,
new AllEqualComparator<FileSet<CopyEntity>>(), 2.0);
Assert.assertTrue(addFiles(list, "fs", 6));
Assert.assertFalse(list.hasRejectedFileSet());
Assert.assertFalse(list.isFull());
Assert.assertTrue(addFiles(list, "fs", 5));
Assert.assertFalse(list.hasRejectedFileSet());
Assert.assertTrue(list.isFull());
Assert.assertTrue(addFiles(list, "fs", 4));
Assert.assertFalse(list.hasRejectedFileSet());
Assert.assertFalse(addFiles(list, "fs", 6));
Assert.assertTrue(list.hasRejectedFileSet());
Assert.assertTrue(list.isFull());
}
@Test
public void testPriority() throws IOException {
ConcurrentBoundedWorkUnitList list = new ConcurrentBoundedWorkUnitList(10,
new NameComparator(), 1);
// Fill container
Assert.assertTrue(addFiles(list, "z-1", 10));
Assert.assertEquals(list.getWorkUnits().size(), 10);
// Reject because same priority
Assert.assertFalse(addFiles(list, "z-2", 5));
// Higher priority, so accept new work units
Assert.assertTrue(addFiles(list, "y-1", 5));
Assert.assertEquals(list.getWorkUnits().size(), 5);
// Lower priority fits, so accept new work units
Assert.assertTrue(addFiles(list, "z-3", 2));
Assert.assertEquals(list.getWorkUnits().size(), 7);
// Lower priority fits, so accept new work units
Assert.assertTrue(addFiles(list, "z-4", 2));
Assert.assertEquals(list.getWorkUnits().size(), 9);
// Higher priority, evict lowest priority
Assert.assertTrue(addFiles(list, "y-2", 4));
Assert.assertEquals(list.getWorkUnits().size(), 9);
// Highest priority, evict lowest priority
Assert.assertTrue(addFiles(list, "x-1", 4));
Assert.assertEquals(list.getWorkUnits().size(), 9);
}
// Compares names of partitions, but only parts of the name before first "-" character. For example, "a-foo" = "a-bar",
// and "a-foo" < "b-bar".
private class NameComparator implements Comparator<FileSet<CopyEntity>> {
@Override public int compare(FileSet<CopyEntity> o1, FileSet<CopyEntity> o2) {
String o1Token = Splitter.on("-").limit(1).split(o1.getName()).iterator().next();
String o2Token = Splitter.on("-").limit(1).split(o2.getName()).iterator().next();
return o1Token.compareTo(o2Token);
}
}
public boolean addFiles(ConcurrentBoundedWorkUnitList list, String fileSetName, int fileNumber) throws IOException {
FileSet.Builder<CopyEntity> partitionBuilder =
new FileSet.Builder<>(fileSetName, new DummyDataset(new Path("/path")));
List<WorkUnit> workUnits = Lists.newArrayList();
for (int i = 0; i < fileNumber; i++) {
CopyEntity cf = createCopyableFile(i);
partitionBuilder.add(cf);
WorkUnit workUnit = new WorkUnit();
CopySource.serializeCopyEntity(workUnit, cf);
workUnits.add(workUnit);
}
return list.addFileSet(partitionBuilder.build(), workUnits);
}
public CopyEntity createCopyableFile(int fileNumber) throws IOException {
Path originPath = new Path(ORIGIN_PATH, fileNumber + ".file");
FileStatus origin = new FileStatus(0, false, 0, 0, 0, originPath);
Path targetPath = new Path(TARGET_PATH, fileNumber + ".file");
return new CopyableFile(origin, targetPath, new OwnerAndPermission(null, null, null),
Lists.<OwnerAndPermission>newArrayList(), null, PreserveAttributes.fromMnemonicString(""), "", 0, 0, Maps
.<String, String>newHashMap(), "", null);
}
}
| 2,347 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/predicates/RegistrationTimeSkipPredicateTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.predicates;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.Maps;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyContext;
import org.apache.gobblin.data.management.copy.hive.HiveCopyEntityHelper;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.data.management.copy.hive.HivePartitionFileSet;
public class RegistrationTimeSkipPredicateTest {
@Test
public void test() throws Exception {
Path partition1Path = new Path("/path/to/partition1");
long modTime = 100000;
CopyContext copyContext = new CopyContext();
CopyConfiguration copyConfiguration = Mockito.mock(CopyConfiguration.class);
Mockito.doReturn(copyContext).when(copyConfiguration).getCopyContext();
HiveDataset dataset = Mockito.mock(HiveDataset.class);
FileSystem fs = Mockito.spy(FileSystem.getLocal(new Configuration()));
FileStatus status = new FileStatus(1, false, 1, 1, modTime, partition1Path);
Path qualifiedPath = fs.makeQualified(partition1Path);
Mockito.doReturn(status).when(fs).getFileStatus(qualifiedPath);
Mockito.doReturn(status).when(fs).getFileStatus(partition1Path);
Mockito.doReturn(fs).when(dataset).getFs();
HiveCopyEntityHelper helper = Mockito.mock(HiveCopyEntityHelper.class);
Mockito.doReturn(copyConfiguration).when(helper).getConfiguration();
Mockito.doReturn(dataset).when(helper).getDataset();
RegistrationTimeSkipPredicate predicate = new RegistrationTimeSkipPredicate(helper);
// partition exists, but registration time before modtime => don't skip
HivePartitionFileSet pc = createPartitionCopy(partition1Path, modTime - 1, true);
Assert.assertFalse(predicate.apply(pc));
// partition exists, registration time equal modtime => don't skip
pc = createPartitionCopy(partition1Path, modTime, true);
Assert.assertFalse(predicate.apply(pc));
// partition exists, registration time larger modtime => do skip
pc = createPartitionCopy(partition1Path, modTime + 1, true);
Assert.assertTrue(predicate.apply(pc));
// partition doesn't exist => don't skip
pc = createPartitionCopy(partition1Path, modTime + 1, false);
Assert.assertFalse(predicate.apply(pc));
// partition exists but is not annotated => don't skip
pc = createPartitionCopy(partition1Path, modTime + 1, true);
pc.getExistingTargetPartition().get().getParameters().clear();
Assert.assertFalse(predicate.apply(pc));
}
public HivePartitionFileSet createPartitionCopy(Path location, long registrationGenerationTime,
boolean targetPartitionExists) {
HivePartitionFileSet partitionCopy = Mockito.mock(HivePartitionFileSet.class);
Partition partition = Mockito.mock(Partition.class);
Mockito.doReturn(location).when(partition).getDataLocation();
Mockito.doReturn(partition).when(partitionCopy).getPartition();
if (targetPartitionExists) {
Partition targetPartition = Mockito.mock(Partition.class);
Map<String, String> parameters = Maps.newHashMap();
parameters.put(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
Long.toString(registrationGenerationTime));
Mockito.doReturn(parameters).when(targetPartition).getParameters();
Mockito.doReturn(Optional.of(targetPartition)).when(partitionCopy).getExistingTargetPartition();
} else {
Mockito.doReturn(Optional.absent()).when(partitionCopy).getExistingTargetPartition();
}
return partitionCopy;
}
}
| 2,348 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/predicates/ExistingPartitionSkipPredicateTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.predicates;
import com.google.common.base.Optional;
import org.apache.gobblin.data.management.copy.hive.HivePartitionFileSet;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.*;
public class ExistingPartitionSkipPredicateTest {
ExistingPartitionSkipPredicate predicate = new ExistingPartitionSkipPredicate();
@Test
public void shouldSkipHiveDatasetWithExistingPartition() {
HivePartitionFileSet fileSetWithExistingPartition = mock(HivePartitionFileSet.class);
HivePartitionFileSet fileSetWithoutExistingPartition = mock(HivePartitionFileSet.class);
Partition partition = mock(Partition.class);
when(fileSetWithExistingPartition.getExistingTargetPartition()).thenReturn(Optional.of(partition));
when(fileSetWithoutExistingPartition.getExistingTargetPartition()).thenReturn(Optional.absent());
Assert.assertTrue(predicate.apply(fileSetWithExistingPartition));
Assert.assertFalse(predicate.apply(fileSetWithoutExistingPartition));
}
}
| 2,349 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/converter/UnGzipConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.converter;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.List;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import joptsimple.internal.Strings;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.data.management.copy.CopyableFileUtils;
import org.apache.gobblin.data.management.copy.FileAwareInputStream;
public class UnGzipConverterTest {
@DataProvider(name = "fileDataProvider")
public static Object[][] fileDataProvider() {
// {filePath, expectedText}
return new Object[][] { { "unGzipConverterTest/archived.tar.gz", "text" }, { "unGzipConverterTest/archived.tgz", "text" } };
}
@Test(dataProvider = "fileDataProvider")
public void testGz(final String filePath, final String expectedText) throws Exception {
UnGzipConverter converter = new UnGzipConverter();
FileSystem fs = FileSystem.getLocal(new Configuration());
String fullPath = getClass().getClassLoader().getResource(filePath).getFile();
FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder()
.file(CopyableFileUtils.getTestCopyableFile(filePath)).inputStream(fs.open(new Path(fullPath))).build();
Iterable<FileAwareInputStream> iterable =
converter.convertRecord("outputSchema", fileAwareInputStream, new WorkUnitState());
String actual = readGzipStreamAsString(Iterables.getFirst(iterable, null).getInputStream());
Assert.assertEquals(actual.trim(), expectedText);
}
@Test
public void testExtensionStripping() throws DataConversionException, IOException {
List<String> helloWorldFiles = ImmutableList.of("helloworld.txt.gzip", "helloworld.txt.gz");
UnGzipConverter converter = new UnGzipConverter();
FileSystem fs = FileSystem.getLocal(new Configuration());
for (String fileName: helloWorldFiles) {
String filePath = "unGzipConverterTest/" + fileName;
String fullPath = getClass().getClassLoader().getResource(filePath).getFile();
FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder()
.file(CopyableFileUtils.getTestCopyableFile(filePath, "/tmp/" + fileName, null, 0L, null))
.inputStream(fs.open(new Path(fullPath))).build();
Iterable<FileAwareInputStream> iterable = converter.convertRecord("outputSchema", fileAwareInputStream, new WorkUnitState());
FileAwareInputStream out = iterable.iterator().next();
Assert.assertEquals(out.getFile().getDestination().getName(), "helloworld.txt");
String contents = IOUtils.toString(out.getInputStream(), StandardCharsets.UTF_8);
Assert.assertEquals(contents, "helloworld\n");
}
}
private static String readGzipStreamAsString(InputStream is) throws Exception {
TarArchiveInputStream tarIn = new TarArchiveInputStream(is);
try {
TarArchiveEntry tarEntry;
while ((tarEntry = tarIn.getNextTarEntry()) != null) {
if (tarEntry.isFile() && tarEntry.getName().endsWith(".txt")) {
return IOUtils.toString(tarIn, "UTF-8");
}
}
} finally {
tarIn.close();
}
return Strings.EMPTY;
}
}
| 2,350 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/converter/DecryptConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.converter;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.nio.charset.Charset;
import java.util.UUID;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.jasypt.util.text.BasicTextEncryptor;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Charsets;
import com.google.common.collect.Iterables;
import com.google.common.io.Files;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.crypto.EncryptionConfigParser;
import org.apache.gobblin.data.management.copy.CopyableFileUtils;
import org.apache.gobblin.data.management.copy.FileAwareInputStream;
/**
* Unit tests for {@link DecryptConverter}.
*/
@Test(enabled=false, groups = { "gobblin.data.management.copy.converter", "disabledOnCI" })
public class DecryptConverterTest {
private final File masterPwdFile = new File("masterPwd");
@Test (enabled=false)
public void testConvertGpgRecord() throws Exception {
final String expectedFileContents = "123456789";
final String passphrase = "12";
DecryptConverter converter = new DecryptConverter();
WorkUnitState workUnitState = new WorkUnitState();
try {
setEncryptedPassphrase(passphrase, workUnitState);
converter.init(workUnitState);
FileSystem fs = FileSystem.getLocal(new Configuration());
URL url = getClass().getClassLoader().getResource("decryptConverterTest/decrypt-test.txt.gpg");
Assert.assertNotNull(url);
String gpgFilePath = url.getFile();
try (FSDataInputStream gpgFileInput = fs.open(new Path(gpgFilePath))) {
FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder()
.file(CopyableFileUtils.getTestCopyableFile()).inputStream(gpgFileInput).build();
Iterable<FileAwareInputStream> iterable =
converter.convertRecord("outputSchema", fileAwareInputStream, workUnitState);
fileAwareInputStream = Iterables.getFirst(iterable, null);
Assert.assertNotNull(fileAwareInputStream);
String actual = IOUtils.toString(fileAwareInputStream.getInputStream(), Charsets.UTF_8);
Assert.assertEquals(actual, expectedFileContents);
}
} finally {
deleteMasterPwdFile();
converter.close();
}
}
@Test (enabled=false)
public void testConvertDifferentEncryption()
throws IOException, DataConversionException {
final String expectedFileContents = "2345678";
WorkUnitState workUnitState = new WorkUnitState();
workUnitState.getJobState()
.setProp("converter.encrypt." + EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY, "insecure_shift");
try (DecryptConverter converter = new DecryptConverter()) {
converter.init(workUnitState);
FileSystem fs = FileSystem.getLocal(new Configuration());
URL url = getClass().getClassLoader().getResource("decryptConverterTest/decrypt-test.txt.insecure_shift");
Assert.assertNotNull(url);
String testFilePath = url.getFile();
try (FSDataInputStream testFileInput = fs.open(new Path(testFilePath))) {
FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder()
.file(CopyableFileUtils.getTestCopyableFile()).inputStream(testFileInput).build();
fileAwareInputStream.getFile().setDestination(new Path("file:///tmp/decrypt-test.txt.insecure_shift"));
Iterable<FileAwareInputStream> iterable =
converter.convertRecord("outputSchema", fileAwareInputStream, workUnitState);
FileAwareInputStream decryptedStream = Iterables.getFirst(iterable, null);
Assert.assertNotNull(decryptedStream);
String actual = IOUtils.toString(decryptedStream.getInputStream(), Charsets.UTF_8);
Assert.assertEquals(actual, expectedFileContents);
Assert.assertEquals(decryptedStream.getFile().getDestination().getName(), "decrypt-test.txt");
}
}
}
private void setEncryptedPassphrase(String plainPassphrase, State state) throws IOException {
String masterPassword = UUID.randomUUID().toString();
createMasterPwdFile(masterPassword);
state.setProp(ConfigurationKeys.ENCRYPT_KEY_LOC, this.masterPwdFile.toString());
state.setProp(ConfigurationKeys.ENCRYPT_USE_STRONG_ENCRYPTOR, false);
BasicTextEncryptor encryptor = new BasicTextEncryptor();
encryptor.setPassword(masterPassword);
String encrypted = encryptor.encrypt(plainPassphrase);
state.setProp("converter.decrypt.passphrase", "ENC(" + encrypted + ")");
}
private void createMasterPwdFile(String masterPwd) throws IOException {
Assert.assertTrue(this.masterPwdFile.createNewFile());
Files.write(masterPwd, this.masterPwdFile, Charset.defaultCharset());
}
private void deleteMasterPwdFile() {
Assert.assertTrue(this.masterPwdFile.delete());
}
}
| 2,351 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/publisher/DeletingCopyDataPublisherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.publisher;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.configuration.WorkUnitState.WorkingState;
import org.apache.gobblin.data.management.copy.CopySource;
import org.apache.gobblin.data.management.copy.CopyableDataset;
import org.apache.gobblin.data.management.copy.CopyableDatasetMetadata;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopyableFileUtils;
import org.apache.gobblin.data.management.copy.TestCopyableDataset;
import java.io.File;
import java.io.IOException;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableList;
import com.google.common.io.Closer;
import com.google.common.io.Files;
@Slf4j
public class DeletingCopyDataPublisherTest {
@Test
public void testDeleteOnSource() throws Exception {
State state = getTestState("testDeleteOnSource");
Path testMethodTempPath = new Path(testClassTempPath, "testDeleteOnSource");
DeletingCopyDataPublisher copyDataPublisher = closer.register(new DeletingCopyDataPublisher(state));
File outputDir = new File(testMethodTempPath.toString(), "task-output/jobid/1f042f494d1fe2198e0e71a17faa233f33b5099b");
outputDir.mkdirs();
outputDir.deleteOnExit();
WorkUnitState wus = new WorkUnitState();
CopyableDataset copyableDataset = new TestCopyableDataset(new Path("origin"));
CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(copyableDataset);
CopyEntity cf = CopyableFileUtils.createTestCopyableFile(new Path(testMethodTempPath, "test.txt").toString());
CopySource.serializeCopyableDataset(wus, metadata);
CopySource.serializeCopyEntity(wus, cf);
Assert.assertTrue(fs.exists(new Path(testMethodTempPath, "test.txt")));
// these 2 properties should already be set before the publisher is called
wus.setProp(ConfigurationKeys.WRITER_STAGING_DIR, testMethodTempPath + "/" + ConfigurationKeys.STAGING_DIR_DEFAULT_SUFFIX);
wus.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, testMethodTempPath + "/task-output");
wus.setProp(ConfigurationKeys.JOB_ID_KEY, "jobid");
wus.setWorkingState(WorkingState.SUCCESSFUL);
copyDataPublisher.publishData(ImmutableList.of(wus));
Assert.assertFalse(fs.exists(new Path(testMethodTempPath, "test.txt")));
}
private static final Closer closer = Closer.create();
private FileSystem fs;
private Path testClassTempPath;
@BeforeClass
public void setup() throws Exception {
fs = FileSystem.getLocal(new Configuration());
testClassTempPath =
new Path(Files.createTempDir().getAbsolutePath(), "DeletingCopyDataPublisherTest");
fs.delete(testClassTempPath, true);
log.info("Created a temp directory for CopyDataPublisherTest at " + testClassTempPath);
fs.mkdirs(testClassTempPath);
}
@AfterClass
public void cleanup() {
try {
closer.close();
fs.delete(testClassTempPath, true);
} catch (IOException e) {
log.warn(e.getMessage());
}
}
private State getTestState(String testMethodName) {
return CopyDataPublisherTest.getTestState(testMethodName, testClassTempPath);
}
}
| 2,352 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/publisher/CopyDataPublisherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.publisher;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopySource;
import org.apache.gobblin.data.management.copy.CopyableDataset;
import org.apache.gobblin.data.management.copy.CopyableDatasetMetadata;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.PreserveAttributes;
import org.apache.gobblin.data.management.copy.TestCopyableDataset;
import org.apache.gobblin.util.PathUtils;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import com.google.common.io.Files;
/*
*
* Test cases covered
* - Single dataset multiple files/workunits
* - Single dataset multiple files/workunits few workunits failed
* - Two datasets multiple files
* - Two datasets one of them failed to publish
* - datasets with overlapping dataset roots
*
*/
@Slf4j
public class CopyDataPublisherTest {
private static final Closer closer = Closer.create();
private FileSystem fs;
private Path testClassTempPath;
@Test
public void testPublishSingleDataset() throws Exception {
State state = getTestState("testPublishSingleDataset");
state.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/");
Path testMethodTempPath = new Path(testClassTempPath, "testPublishSingleDataset");
CopyDataPublisher copyDataPublisher = closer.register(new CopyDataPublisher(state));
TestDatasetManager datasetManager =
new TestDatasetManager(testMethodTempPath, state, "datasetTargetPath",
ImmutableList.of("a/b", "a/c", "d/e"));
datasetManager.createDatasetFiles();
datasetManager.verifyDoesntExist();
copyDataPublisher.publishData(datasetManager.getWorkUnitStates());
datasetManager.verifyExists();
}
@Test
@SuppressWarnings("unchecked")
public void testPublishMultipleDatasets() throws Exception {
State state = getTestState("testPublishMultipleDatasets");
state.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/");
Path testMethodTempPath = new Path(testClassTempPath, "testPublishMultipleDatasets");
CopyDataPublisher copyDataPublisher = closer.register(new CopyDataPublisher(state));
TestDatasetManager dataset1Manager =
new TestDatasetManager(testMethodTempPath, state, "dataset1TargetPath",
ImmutableList.of("a/b", "a/c", "d/e"));
dataset1Manager.createDatasetFiles();
TestDatasetManager dataset2Manager =
new TestDatasetManager(testMethodTempPath, state, "dataset2TargetPath",
ImmutableList.of("a/b", "a/c", "d/e"));
dataset2Manager.createDatasetFiles();
dataset1Manager.verifyDoesntExist();
dataset2Manager.verifyDoesntExist();
copyDataPublisher.publishData(combine(dataset1Manager.getWorkUnitStates(), dataset2Manager.getWorkUnitStates()));
dataset1Manager.verifyExists();
dataset2Manager.verifyExists();
}
@Test
@SuppressWarnings("unchecked")
public void testPublishOverlappingDatasets() throws Exception {
State state = getTestState("testPublishOverlappingDatasets");
state.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/");
Path testMethodTempPath = new Path(testClassTempPath, "testPublishOverlappingDatasets");
CopyDataPublisher copyDataPublisher = closer.register(new CopyDataPublisher(state));
TestDatasetManager dataset1Manager =
new TestDatasetManager(testMethodTempPath, state, "datasetTargetPath", ImmutableList.of("a/b"));
dataset1Manager.createDatasetFiles();
TestDatasetManager dataset2Manager =
new TestDatasetManager(testMethodTempPath, state, "datasetTargetPath/subDir",
ImmutableList.of("a/c", "d/e"));
dataset2Manager.createDatasetFiles();
dataset1Manager.verifyDoesntExist();
dataset2Manager.verifyDoesntExist();
copyDataPublisher.publishData(combine(dataset1Manager.getWorkUnitStates(), dataset2Manager.getWorkUnitStates()));
dataset1Manager.verifyExists();
dataset2Manager.verifyExists();
}
@Test
@SuppressWarnings("unchecked")
public void testPublishDatasetFailure() throws Exception {
State state = getTestState("testPublishDatasetFailure");
state.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/");
Path testMethodTempPath = new Path(testClassTempPath, "testPublishDatasetFailure");
CopyDataPublisher copyDataPublisher = closer.register(new CopyDataPublisher(state));
TestDatasetManager successDatasetManager =
new TestDatasetManager(testMethodTempPath, state, "successTargetPath", ImmutableList.of("a/b"));
successDatasetManager.createDatasetFiles();
TestDatasetManager failedDatasetManager =
new TestDatasetManager(testMethodTempPath, state, "failedTargetPath", ImmutableList.of("c/d"));
successDatasetManager.verifyDoesntExist();
failedDatasetManager.verifyDoesntExist();
copyDataPublisher.publishData(combine(successDatasetManager.getWorkUnitStates(),
failedDatasetManager.getWorkUnitStates()));
successDatasetManager.verifyExists();
failedDatasetManager.verifyDoesntExist();
}
@BeforeClass
public void setup() throws Exception {
fs = FileSystem.getLocal(new Configuration());
testClassTempPath = new Path(Files.createTempDir().getAbsolutePath(), "CopyDataPublisherTest");
fs.delete(testClassTempPath, true);
log.info("Created a temp directory for CopyDataPublisherTest at " + testClassTempPath);
fs.mkdirs(testClassTempPath);
}
private static Collection<? extends WorkUnitState> combine(List<WorkUnitState>... workUnitStateLists) {
List<WorkUnitState> wus = Lists.newArrayList();
for (List<WorkUnitState> workUnitStates : workUnitStateLists) {
wus.addAll(workUnitStates);
}
return wus;
}
private State getTestState(String testMethodName) {
return getTestState(testMethodName, testClassTempPath);
}
public static State getTestState(String testMethodName, Path testClassTempPath) {
Path testMethodPath = new Path(testClassTempPath, testMethodName);
State state = new State();
state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testMethodPath, "task-output"));
state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testMethodPath, "task-staging"));
state.setProp(ConfigurationKeys.JOB_ID_KEY, "jobid");
return state;
}
public static class TestDatasetManager {
private CopyableDataset copyableDataset;
private CopyableDatasetMetadata metadata;
private List<String> relativeFilePaths;
private Path writerOutputPath;
private Path targetPath;
private FileSystem fs;
private CopyEntity copyEntity;
State state;
private void createDatasetFiles() throws IOException {
// Create writer output files
Path datasetWriterOutputPath =
new Path(writerOutputPath + "/" + state.getProp(ConfigurationKeys.JOB_ID_KEY), copyEntity.getDatasetAndPartition(this.metadata).identifier());
Path outputPathWithCurrentDirectory = new Path(datasetWriterOutputPath,
PathUtils.withoutLeadingSeparator(this.targetPath));
for (String path : relativeFilePaths) {
Path pathToCreate = new Path(outputPathWithCurrentDirectory, path);
fs.mkdirs(pathToCreate.getParent());
fs.create(pathToCreate);
}
}
public TestDatasetManager(Path testMethodTempPath, State state, String datasetTargetPath, List<String> relativeFilePaths)
throws IOException {
this.fs = FileSystem.getLocal(new Configuration());
this.copyableDataset =
new TestCopyableDataset(new Path("origin"));
this.metadata = new CopyableDatasetMetadata(this.copyableDataset);
this.relativeFilePaths = relativeFilePaths;
this.writerOutputPath = new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR));
this.state = state;
this.targetPath = new Path(testMethodTempPath, datasetTargetPath);
FileStatus file = new FileStatus(0, false, 0, 0, 0, new Path("/file"));
FileSystem fs = FileSystem.getLocal(new Configuration());
this.copyEntity = CopyableFile.fromOriginAndDestination(fs, file, new Path("/destination"),
CopyConfiguration.builder(fs, state.getProperties()).preserve(PreserveAttributes.fromMnemonicString(""))
.build()).build();
fs.mkdirs(testMethodTempPath);
log.info("Created a temp directory for test at " + testMethodTempPath);
}
List<WorkUnitState> getWorkUnitStates() throws IOException {
List<WorkUnitState> workUnitStates =
Lists.newArrayList(new WorkUnitState(), new WorkUnitState(), new WorkUnitState());
for (WorkUnitState wus : workUnitStates) {
wus.addAll(this.state); // propagate job state into work unit state, this is always done in copysource workunit generation
CopySource.serializeCopyableDataset(wus, metadata);
CopySource.serializeCopyEntity(wus, this.copyEntity);
}
return workUnitStates;
}
void verifyExists() throws IOException {
for (String fileRelativePath : relativeFilePaths) {
Path filePublishPath = new Path(this.targetPath, fileRelativePath);
Assert.assertEquals(fs.exists(filePublishPath), true);
}
}
void verifyDoesntExist() throws IOException {
for (String fileRelativePath : relativeFilePaths) {
Path filePublishPath = new Path(this.targetPath, fileRelativePath);
Assert.assertEquals(fs.exists(filePublishPath), false);
}
}
}
@AfterClass
public void cleanup() {
try {
closer.close();
fs.delete(testClassTempPath, true);
} catch (IOException e) {
log.warn(e.getMessage());
}
}
}
| 2,353 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/recovery/RecoveryHelperTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.recovery;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.OutputStream;
import java.util.concurrent.TimeUnit;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.base.Predicates;
import com.google.common.io.Files;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopySource;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.PreserveAttributes;
import org.apache.gobblin.util.guid.Guid;
public class RecoveryHelperTest {
private File tmpDir;
@BeforeMethod public void setUp() throws Exception {
this.tmpDir = Files.createTempDir();
this.tmpDir.deleteOnExit();
}
@Test public void testGetPersistDir() throws Exception {
State state = new State();
Assert.assertFalse(RecoveryHelper.getPersistDir(state).isPresent());
state.setProp(RecoveryHelper.PERSIST_DIR_KEY, this.tmpDir.getAbsolutePath());
Assert.assertTrue(RecoveryHelper.getPersistDir(state).isPresent());
Assert.assertTrue(RecoveryHelper.getPersistDir(state).get().toUri().getPath().
startsWith(this.tmpDir.getAbsolutePath()));
}
@Test public void testPersistFile() throws Exception {
String content = "contents";
File stagingDir = Files.createTempDir();
stagingDir.deleteOnExit();
File file = new File(stagingDir, "file");
OutputStream os = new FileOutputStream(file);
IOUtils.write(content, os);
os.close();
Assert.assertEquals(stagingDir.listFiles().length, 1);
State state = new State();
state.setProp(RecoveryHelper.PERSIST_DIR_KEY, this.tmpDir.getAbsolutePath());
state.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/publisher");
File recoveryDir = new File(RecoveryHelper.getPersistDir(state).get().toUri().getPath());
FileSystem fs = FileSystem.getLocal(new Configuration());
CopyableFile copyableFile = CopyableFile.builder(fs,
new FileStatus(0, false, 0, 0, 0, new Path("/file")), new Path("/dataset"),
CopyConfiguration.builder(fs, state.getProperties()).preserve(PreserveAttributes.fromMnemonicString("")).build()).build();
CopySource.setWorkUnitGuid(state, Guid.fromHasGuid(copyableFile));
RecoveryHelper recoveryHelper = new RecoveryHelper(FileSystem.getLocal(new Configuration()), state);
recoveryHelper.persistFile(state, copyableFile, new Path(file.getAbsolutePath()));
Assert.assertEquals(stagingDir.listFiles().length, 0);
Assert.assertEquals(recoveryDir.listFiles().length, 1);
File fileInRecovery = recoveryDir.listFiles()[0].listFiles()[0];
Assert.assertEquals(IOUtils.readLines(new FileInputStream(fileInRecovery)).get(0), content);
Optional<FileStatus> fileToRecover =
recoveryHelper.findPersistedFile(state, copyableFile, Predicates.<FileStatus>alwaysTrue());
Assert.assertTrue(fileToRecover.isPresent());
Assert.assertEquals(fileToRecover.get().getPath().toUri().getPath(), fileInRecovery.getAbsolutePath());
fileToRecover =
recoveryHelper.findPersistedFile(state, copyableFile, Predicates.<FileStatus>alwaysFalse());
Assert.assertFalse(fileToRecover.isPresent());
}
@Test
public void testPurge() throws Exception {
String content = "contents";
File persistDirBase = Files.createTempDir();
persistDirBase.deleteOnExit();
State state = new State();
state.setProp(RecoveryHelper.PERSIST_DIR_KEY, persistDirBase.getAbsolutePath());
state.setProp(RecoveryHelper.PERSIST_RETENTION_KEY, "1");
RecoveryHelper recoveryHelper = new RecoveryHelper(FileSystem.getLocal(new Configuration()), state);
File persistDir = new File(RecoveryHelper.getPersistDir(state).get().toString());
persistDir.mkdir();
File file = new File(persistDir, "file1");
OutputStream os = new FileOutputStream(file);
IOUtils.write(content, os);
os.close();
file.setLastModified(System.currentTimeMillis() - TimeUnit.HOURS.toMillis(2));
File file2 = new File(persistDir, "file2");
OutputStream os2 = new FileOutputStream(file2);
IOUtils.write(content, os2);
os2.close();
Assert.assertEquals(persistDir.listFiles().length, 2);
recoveryHelper.purgeOldPersistedFile();
Assert.assertEquals(persistDir.listFiles().length, 1);
}
@Test public void testShortenPathName() throws Exception {
Assert.assertEquals(RecoveryHelper.shortenPathName(new Path("/test"), 10), "_test");
Assert.assertEquals(RecoveryHelper.shortenPathName(new Path("/relatively/long/path"), 9), "_re...ath");
}
}
| 2,354 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/splitter/DistcpFileSplitterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.splitter;
import java.net.URI;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.copy.CopySource;
import org.apache.gobblin.data.management.copy.CopyableDatasetMetadata;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.CopyableFileUtils;
import org.apache.gobblin.data.management.copy.TestCopyableDataset;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.ForkOperatorUtils;
import org.apache.gobblin.util.guid.Guid;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
public class DistcpFileSplitterTest {
// This test ONLY checks whether manipulates the properties of the work units correctly
// (i.e. "merging" them within the collection that was passed in).
// It does NOT check that the merge in the filesystem has been completed successfully.
// This requires testing on an HDFS setup.
@Test
public void testMergeSplitWorkUnits() throws Exception {
long mockFileLen = 12L;
long mockBlockSize = 4L;
long mockMaxSplitSize = 4L;
long expectedSplitSize = (mockMaxSplitSize / mockBlockSize) * mockBlockSize;
int expectedSplits = (int) (mockFileLen / expectedSplitSize + 1);
FileSystem fs = mock(FileSystem.class);
List<WorkUnitState> splitWorkUnits =
createMockSplitWorkUnits(fs, mockFileLen, mockBlockSize, mockMaxSplitSize).stream()
.map(wu -> new WorkUnitState(wu)).collect(Collectors.toList());
Assert.assertEquals(splitWorkUnits.size(), expectedSplits);
Collection<WorkUnitState> mergedWorkUnits = DistcpFileSplitter.mergeAllSplitWorkUnits(fs, splitWorkUnits);
Assert.assertEquals(mergedWorkUnits.size(), 1);
}
// This test checks whether a work unit has been successfully set up for a split,
// but does not check that the split is actually done correctly when input streams are used.
@Test
public void testSplitFile() throws Exception {
long mockFileLen = 12L;
long mockBlockSize = 4L;
long mockMaxSplitSize = 4L;
long expectedSplitSize = (mockMaxSplitSize / mockBlockSize) * mockBlockSize;
int expectedSplits = (int) (mockFileLen / expectedSplitSize + 1);
FileSystem fs = mock(FileSystem.class);
Collection<WorkUnit> splitWorkUnits = createMockSplitWorkUnits(fs, mockFileLen, mockBlockSize, mockMaxSplitSize);
Assert.assertEquals(splitWorkUnits.size(), expectedSplits);
Set<Integer> splitNums = new HashSet<>();
boolean hasLastSplit = false;
for (WorkUnit wu : splitWorkUnits) {
Optional<DistcpFileSplitter.Split> split = DistcpFileSplitter.getSplit(wu);
Assert.assertTrue(split.isPresent());
Assert.assertEquals(split.get().getTotalSplits(), expectedSplits);
int splitNum = split.get().getSplitNumber();
Assert.assertFalse(splitNums.contains(splitNum));
splitNums.add(splitNum);
Assert.assertEquals(split.get().getLowPosition(), expectedSplitSize * splitNum);
if (split.get().isLastSplit()) {
hasLastSplit = true;
Assert.assertEquals(split.get().getHighPosition(), mockFileLen);
} else {
Assert.assertEquals(split.get().getHighPosition(), expectedSplitSize * (splitNum + 1));
}
}
Assert.assertTrue(hasLastSplit);
}
private Collection<WorkUnit> createMockSplitWorkUnits(FileSystem fs, long fileLen, long blockSize, long maxSplitSize)
throws Exception {
FileStatus file = mock(FileStatus.class);
when(file.getLen()).thenReturn(fileLen);
when(file.getBlockSize()).thenReturn(blockSize);
URI uri = new URI("hdfs", "dummyhost", "/test", "test");
Path path = new Path(uri);
when(fs.getUri()).thenReturn(uri);
CopyableDatasetMetadata cdm = new CopyableDatasetMetadata(new TestCopyableDataset(path));
CopyableFile cf = CopyableFileUtils.getTestCopyableFile();
CopyableFile spy = spy(cf);
doReturn(file).when(spy).getFileStatus();
doReturn(blockSize).when(spy).getBlockSize(any(FileSystem.class));
doReturn(path).when(spy).getDestination();
WorkUnit wu = WorkUnit.createEmpty();
wu.setProp(DistcpFileSplitter.MAX_SPLIT_SIZE_KEY, maxSplitSize);
wu.setProp(ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_OUTPUT_DIR, 1, 0),
path.toString());
CopySource.setWorkUnitGuid(wu, Guid.fromStrings(wu.toString()));
CopySource.serializeCopyEntity(wu, cf);
CopySource.serializeCopyableDataset(wu, cdm);
return DistcpFileSplitter.splitFile(spy, wu, fs);
}
}
| 2,355 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/extractor/InputStreamExtractorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.extractor;
import java.io.IOException;
import java.util.Properties;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.FileAwareInputStream;
import org.apache.gobblin.data.management.copy.PreserveAttributes;
public class InputStreamExtractorTest {
@Test
public void testReadRecord() throws Exception {
CopyableFile file = getTestCopyableFile("inputStreamExtractorTest/first.txt");
FileAwareInputStreamExtractor extractor =
new FileAwareInputStreamExtractor(FileSystem.getLocal(new Configuration()), file);
FileAwareInputStream fileAwareInputStream = extractor.readRecord(null);
Assert.assertEquals(fileAwareInputStream.getFile().getOrigin().getPath(), file.getOrigin().getPath());
Assert.assertEquals(IOUtils.toString(fileAwareInputStream.getInputStream()), "first");
Assert.assertNull(extractor.readRecord(null));
}
private CopyableFile getTestCopyableFile(String resourcePath) throws IOException {
String filePath = getClass().getClassLoader().getResource(resourcePath).getFile();
FileStatus status = new FileStatus(0l, false, 0, 0l, 0l, new Path(filePath));
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/publisher");
return CopyableFile.fromOriginAndDestination(FileSystem.getLocal(new Configuration()), status,
new Path("/destination"), CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties)
.preserve(PreserveAttributes.fromMnemonicString("")).build()).build();
}
}
| 2,356 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/iceberg/IcebergDatasetTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.iceberg;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.Set;
import java.util.function.BiFunction;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import org.testng.collections.Sets;
import com.google.api.client.util.Maps;
import com.google.common.collect.Lists;
import com.google.gson.Gson;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import joptsimple.internal.Strings;
import lombok.Data;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyContext;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.PreserveAttributes;
import static org.mockito.Mockito.any;
/** Tests for {@link org.apache.gobblin.data.management.copy.iceberg.IcebergDataset} */
public class IcebergDatasetTest {
private static final URI SRC_FS_URI;
private static final URI DEST_FS_URI;
static {
try {
SRC_FS_URI = new URI("abc", "the.source.org", "/", null);
DEST_FS_URI = new URI("xyz", "the.dest.org", "/", null);
} catch (URISyntaxException e) {
throw new RuntimeException("should not occur!", e);
}
}
private static final String ROOT_PATH = "/root/iceberg/test/";
private static final String METADATA_PATH = ROOT_PATH + "metadata/metadata.json";
private static final String MANIFEST_LIST_PATH_0 = ROOT_PATH + "metadata/manifest_list.x";
private static final String MANIFEST_PATH_0 = ROOT_PATH + "metadata/manifest.a";
private static final String MANIFEST_DATA_PATH_0A = ROOT_PATH + "data/p0/a";
private static final String MANIFEST_DATA_PATH_0B = ROOT_PATH + "data/p0/b";
private static final MockIcebergTable.SnapshotPaths SNAPSHOT_PATHS_0 =
new MockIcebergTable.SnapshotPaths(Optional.of(METADATA_PATH), MANIFEST_LIST_PATH_0, Arrays.asList(
new IcebergSnapshotInfo.ManifestFileInfo(MANIFEST_PATH_0,
Arrays.asList(MANIFEST_DATA_PATH_0A, MANIFEST_DATA_PATH_0B))));
private static final String MANIFEST_LIST_PATH_1 = MANIFEST_LIST_PATH_0.replaceAll("\\.x$", ".y");
private static final String MANIFEST_PATH_1 = MANIFEST_PATH_0.replaceAll("\\.a$", ".b");
private static final String MANIFEST_DATA_PATH_1A = MANIFEST_DATA_PATH_0A.replaceAll("/p0/", "/p1/");
private static final String MANIFEST_DATA_PATH_1B = MANIFEST_DATA_PATH_0B.replaceAll("/p0/", "/p1/");
private static final MockIcebergTable.SnapshotPaths SNAPSHOT_PATHS_1 =
new MockIcebergTable.SnapshotPaths(Optional.empty(), MANIFEST_LIST_PATH_1, Arrays.asList(
new IcebergSnapshotInfo.ManifestFileInfo(MANIFEST_PATH_1,
Arrays.asList(MANIFEST_DATA_PATH_1A, MANIFEST_DATA_PATH_1B))));
private final String testDbName = "test_db_name";
private final String testTblName = "test_tbl_name";
public static final String SRC_CATALOG_URI = "abc://the.source.org/catalog";
private final Properties copyConfigProperties = new Properties();
@BeforeClass
public void setUp() throws Exception {
copyConfigProperties.setProperty("data.publisher.final.dir", "/test");
}
@Test
public void testGetFilePathsWhenDestEmpty() throws IOException {
List<MockIcebergTable.SnapshotPaths> icebergSnapshots = Lists.newArrayList(SNAPSHOT_PATHS_1, SNAPSHOT_PATHS_0);
List<String> existingDestPaths = Lists.newArrayList();
Set<Path> expectedResultPaths = withAllSnapshotPaths(Sets.newHashSet(), SNAPSHOT_PATHS_1, SNAPSHOT_PATHS_0);
validateGetFilePathsGivenDestState(icebergSnapshots, existingDestPaths, expectedResultPaths);
}
@Test
public void testGetFilePathsWhenOneManifestListAtDest() throws IOException {
List<MockIcebergTable.SnapshotPaths> icebergSnapshots = Lists.newArrayList(SNAPSHOT_PATHS_1, SNAPSHOT_PATHS_0);
List<String> existingDestPaths = Lists.newArrayList(MANIFEST_LIST_PATH_1);
Set<Path> expectedResultPaths = withAllSnapshotPaths(Sets.newHashSet(), SNAPSHOT_PATHS_0);
validateGetFilePathsGivenDestState(icebergSnapshots, existingDestPaths, expectedResultPaths);
}
@Test
public void testGetFilePathsWhenOneManifestAtDest() throws IOException {
List<MockIcebergTable.SnapshotPaths> icebergSnapshots = Lists.newArrayList(SNAPSHOT_PATHS_1, SNAPSHOT_PATHS_0);
List<String> existingDestPaths = Lists.newArrayList(MANIFEST_PATH_1);
Set<Path> expectedResultPaths = withAllSnapshotPaths(Sets.newHashSet(), SNAPSHOT_PATHS_0);
expectedResultPaths.add(new Path(MANIFEST_LIST_PATH_1)); // expect manifest's parent, despite manifest subtree skip
validateGetFilePathsGivenDestState(icebergSnapshots, existingDestPaths, expectedResultPaths);
}
@Test
public void testGetFilePathsWhenSomeDataFilesAtDest() throws IOException {
List<MockIcebergTable.SnapshotPaths> icebergSnapshots = Lists.newArrayList(SNAPSHOT_PATHS_1, SNAPSHOT_PATHS_0);
List<String> existingDestPaths = Lists.newArrayList(MANIFEST_DATA_PATH_1B, MANIFEST_DATA_PATH_0A);
Set<Path> expectedResultPaths = withAllSnapshotPaths(Sets.newHashSet(), SNAPSHOT_PATHS_1, SNAPSHOT_PATHS_0);
// despite already existing on target, expect anyway: per-file check skipped for optimization's sake
// expectedResultPaths.remove(new Path(MANIFEST_DATA_PATH_1B));
// expectedResultPaths.remove(new Path(MANIFEST_DATA_PATH_0A));
validateGetFilePathsGivenDestState(icebergSnapshots, existingDestPaths, expectedResultPaths);
}
@Test
public void testGetFilePathsWillSkipMissingSourceFile() throws IOException {
List<MockIcebergTable.SnapshotPaths> icebergSnapshots = Lists.newArrayList(SNAPSHOT_PATHS_1, SNAPSHOT_PATHS_0);
// pretend this path doesn't exist on source:
Path missingPath = new Path(MANIFEST_DATA_PATH_0A);
Set<Path> existingSourcePaths = withAllSnapshotPaths(Sets.newHashSet(), SNAPSHOT_PATHS_1, SNAPSHOT_PATHS_0);
existingSourcePaths.remove(missingPath);
List<String> existingDestPaths = Lists.newArrayList(MANIFEST_LIST_PATH_1);
Set<Path> expectedResultPaths = withAllSnapshotPaths(Sets.newHashSet(), SNAPSHOT_PATHS_0);
expectedResultPaths.remove(missingPath);
validateGetFilePathsGivenDestState(icebergSnapshots,
Optional.of(existingSourcePaths.stream().map(Path::toString).collect(Collectors.toList())), existingDestPaths,
expectedResultPaths);
}
@Test
public void testGetFilePathsWhenManifestListsAtDestButNotMetadata() throws IOException {
List<MockIcebergTable.SnapshotPaths> icebergSnapshots = Lists.newArrayList(SNAPSHOT_PATHS_1, SNAPSHOT_PATHS_0);
List<String> existingDestPaths = Lists.newArrayList(MANIFEST_LIST_PATH_1, MANIFEST_LIST_PATH_0);
Set<Path> expectedResultPaths = Sets.newHashSet();
expectedResultPaths.add(new Path(METADATA_PATH));
validateGetFilePathsGivenDestState(icebergSnapshots, existingDestPaths, expectedResultPaths);
}
@Test
public void testGetFilePathsWhenAllAtDest() throws IOException {
List<MockIcebergTable.SnapshotPaths> icebergSnapshots = Lists.newArrayList(SNAPSHOT_PATHS_1, SNAPSHOT_PATHS_0);
List<String> existingDestPaths = Lists.newArrayList(METADATA_PATH, MANIFEST_LIST_PATH_1, MANIFEST_LIST_PATH_0);
Set<Path> expectedResultPaths = Sets.newHashSet(); // not expecting any delta
IcebergTable mockTable =
validateGetFilePathsGivenDestState(icebergSnapshots, existingDestPaths, expectedResultPaths);
// ensure short-circuiting was able to avert iceberg manifests scan
Mockito.verify(mockTable, Mockito.times(1)).getCurrentSnapshotInfoOverviewOnly();
Mockito.verifyNoMoreInteractions(mockTable);
}
/** Exception wrapping is used internally--ensure that doesn't lapse into silently swallowing errors */
@Test(expectedExceptions = IOException.class)
public void testGetFilePathsDoesNotSwallowDestFileSystemException() throws IOException {
IcebergTable icebergTable = MockIcebergTable.withSnapshots(Lists.newArrayList(SNAPSHOT_PATHS_0));
MockFileSystemBuilder sourceFsBuilder = new MockFileSystemBuilder(SRC_FS_URI);
FileSystem sourceFs = sourceFsBuilder.build();
IcebergDataset icebergDataset = new IcebergDataset(testDbName, testTblName, icebergTable, null, new Properties(), sourceFs);
MockFileSystemBuilder destFsBuilder = new MockFileSystemBuilder(DEST_FS_URI);
FileSystem destFs = destFsBuilder.build();
Mockito.doThrow(new IOException("Ha - not so fast!")).when(destFs).getFileStatus(new Path(SNAPSHOT_PATHS_0.manifestListPath));
CopyConfiguration copyConfiguration = createEmptyCopyConfiguration(destFs);
icebergDataset.getFilePathsToFileStatus(destFs, copyConfiguration);
}
/** Validate error consolidation used to streamline logging. */
@Test
public void testPathErrorConsolidator() {
IcebergDataset.PathErrorConsolidator pec = IcebergDataset.createPathErrorConsolidator();
Optional<String> msg0 = pec.prepLogMsg(new Path("/a/b/c/file0"));
Assert.assertTrue(msg0.isPresent());
Assert.assertEquals(msg0.get(), "path not found: '/a/b/c/file0'");
Optional<String> msg1 = pec.prepLogMsg(new Path("/a/b/c/file1"));
Assert.assertTrue(msg1.isPresent());
Assert.assertEquals(msg1.get(), "paths not found: '/a/b/c/...'");
Optional<String> msg2 = pec.prepLogMsg(new Path("/a/b/c/file2"));
Assert.assertFalse(msg2.isPresent());
Optional<String> msg3 = pec.prepLogMsg(new Path("/a/b/c-other/file0"));
Assert.assertTrue(msg3.isPresent());
}
/**
* Test case to generate copy entities for all the file paths for a mocked iceberg table.
* The assumption here is that we create copy entities for all the matching file paths,
* without calculating any difference between the source and destination
*/
@Test
public void testGenerateCopyEntitiesWhenDestEmpty() throws IOException {
List<String> expectedPaths = Arrays.asList(METADATA_PATH, MANIFEST_LIST_PATH_0,
MANIFEST_PATH_0, MANIFEST_DATA_PATH_0A, MANIFEST_DATA_PATH_0B);
MockFileSystemBuilder sourceBuilder = new MockFileSystemBuilder(SRC_FS_URI);
sourceBuilder.addPaths(expectedPaths);
FileSystem sourceFs = sourceBuilder.build();
IcebergTable srcIcebergTable = MockIcebergTable.withSnapshots(Arrays.asList(SNAPSHOT_PATHS_0));
IcebergTable destIcebergTable = MockIcebergTable.withSnapshots(Arrays.asList(SNAPSHOT_PATHS_1));
IcebergDataset icebergDataset =
new TrickIcebergDataset(testDbName, testTblName, srcIcebergTable, destIcebergTable, new Properties(), sourceFs);
MockFileSystemBuilder destBuilder = new MockFileSystemBuilder(DEST_FS_URI);
FileSystem destFs = destBuilder.build();
CopyConfiguration copyConfiguration =
CopyConfiguration.builder(destFs, copyConfigProperties).preserve(PreserveAttributes.fromMnemonicString(""))
.copyContext(new CopyContext()).build();
Collection<CopyEntity> copyEntities = icebergDataset.generateCopyEntities(destFs, copyConfiguration);
verifyCopyEntities(copyEntities, expectedPaths);
}
/** Test generating copy entities for a multi-snapshot iceberg; given empty dest, src-dest delta will be entirety */
@Test
public void testGenerateCopyEntitiesMultiSnapshotWhenDestEmpty() throws IOException {
List<String> expectedPaths = Arrays.asList(METADATA_PATH,
MANIFEST_LIST_PATH_0, MANIFEST_PATH_0, MANIFEST_DATA_PATH_0A, MANIFEST_DATA_PATH_0B,
MANIFEST_LIST_PATH_1, MANIFEST_PATH_1, MANIFEST_DATA_PATH_1A, MANIFEST_DATA_PATH_1B);
MockFileSystemBuilder sourceBuilder = new MockFileSystemBuilder(SRC_FS_URI);
sourceBuilder.addPaths(expectedPaths);
FileSystem sourceFs = sourceBuilder.build();
IcebergTable srcIcebergTable = MockIcebergTable.withSnapshots(Arrays.asList(SNAPSHOT_PATHS_1, SNAPSHOT_PATHS_0));
IcebergTable destIcebergTable = MockIcebergTable.withSnapshots(Arrays.asList(SNAPSHOT_PATHS_1));
IcebergDataset icebergDataset =
new TrickIcebergDataset(testDbName, testTblName, srcIcebergTable, destIcebergTable, new Properties(), sourceFs);
MockFileSystemBuilder destBuilder = new MockFileSystemBuilder(DEST_FS_URI);
FileSystem destFs = destBuilder.build();
CopyConfiguration copyConfiguration =
CopyConfiguration.builder(destFs, copyConfigProperties).preserve(PreserveAttributes.fromMnemonicString(""))
.copyContext(new CopyContext()).build();
Collection<CopyEntity> copyEntities = icebergDataset.generateCopyEntities(destFs, copyConfiguration);
verifyCopyEntities(copyEntities, expectedPaths);
}
@Test
public void testFsOwnershipAndPermissionPreservationWhenDestEmpty() throws IOException {
FileStatus metadataFileStatus = new FileStatus(0, false, 0, 0, 0, 0, new FsPermission(FsAction.WRITE, FsAction.READ, FsAction.NONE), "metadata_owner", "metadata_group", null);
FileStatus manifestFileStatus = new FileStatus(0, false, 0, 0, 0, 0, new FsPermission(FsAction.WRITE, FsAction.READ, FsAction.NONE), "manifest_list_owner", "manifest_list_group", null);
FileStatus manifestDataFileStatus = new FileStatus(0, false, 0, 0, 0, 0, new FsPermission(FsAction.WRITE_EXECUTE, FsAction.READ_EXECUTE, FsAction.NONE), "manifest_data_owner", "manifest_data_group", null);
Map<String, FileStatus> expectedPathsAndFileStatuses = Maps.newHashMap();
expectedPathsAndFileStatuses.put(METADATA_PATH, metadataFileStatus);
expectedPathsAndFileStatuses.put(MANIFEST_PATH_0, manifestFileStatus);
expectedPathsAndFileStatuses.put(MANIFEST_LIST_PATH_0, manifestFileStatus);
expectedPathsAndFileStatuses.put(MANIFEST_DATA_PATH_0A, manifestDataFileStatus);
expectedPathsAndFileStatuses.put(MANIFEST_DATA_PATH_0B, manifestDataFileStatus);
MockFileSystemBuilder sourceBuilder = new MockFileSystemBuilder(SRC_FS_URI);
sourceBuilder.addPathsAndFileStatuses(expectedPathsAndFileStatuses);
FileSystem sourceFs = sourceBuilder.build();
IcebergTable srcIcebergTable = MockIcebergTable.withSnapshots(Arrays.asList(SNAPSHOT_PATHS_0));
IcebergTable destIcebergTable = MockIcebergTable.withSnapshots(Arrays.asList(SNAPSHOT_PATHS_1));
IcebergDataset icebergDataset = new TrickIcebergDataset(testDbName, testTblName, srcIcebergTable, destIcebergTable, new Properties(), sourceFs);
MockFileSystemBuilder destBuilder = new MockFileSystemBuilder(DEST_FS_URI);
FileSystem destFs = destBuilder.build();
CopyConfiguration copyConfiguration =
CopyConfiguration.builder(destFs, copyConfigProperties)
// preserving attributes for owner, group and permissions respectively
.preserve(PreserveAttributes.fromMnemonicString("ugp"))
.copyContext(new CopyContext()).build();
Collection<CopyEntity> copyEntities = icebergDataset.generateCopyEntities(destFs, copyConfiguration);
verifyFsOwnershipAndPermissionPreservation(copyEntities, sourceBuilder.getPathsAndFileStatuses());
}
@Test
public void testFsOwnershipAndPermissionWithoutPreservationWhenDestEmpty() throws IOException {
List<String> expectedPaths = Arrays.asList(METADATA_PATH, MANIFEST_LIST_PATH_0,
MANIFEST_PATH_0, MANIFEST_DATA_PATH_0A, MANIFEST_DATA_PATH_0B);
Map<Path, FileStatus> expectedPathsAndFileStatuses = Maps.newHashMap();
for (String expectedPath : expectedPaths) {
expectedPathsAndFileStatuses.putIfAbsent(new Path(expectedPath), new FileStatus());
}
MockFileSystemBuilder sourceBuilder = new MockFileSystemBuilder(SRC_FS_URI);
sourceBuilder.addPaths(expectedPaths);
FileSystem sourceFs = sourceBuilder.build();
IcebergTable srcIcebergTable = MockIcebergTable.withSnapshots(Arrays.asList(SNAPSHOT_PATHS_0));
IcebergTable destIcebergTable = MockIcebergTable.withSnapshots(Arrays.asList(SNAPSHOT_PATHS_1));
IcebergDataset icebergDataset = new TrickIcebergDataset(testDbName, testTblName, srcIcebergTable, destIcebergTable, new Properties(), sourceFs);
MockFileSystemBuilder destBuilder = new MockFileSystemBuilder(DEST_FS_URI);
FileSystem destFs = destBuilder.build();
CopyConfiguration copyConfiguration =
CopyConfiguration.builder(destFs, copyConfigProperties)
// without preserving attributes for owner, group and permissions
.preserve(PreserveAttributes.fromMnemonicString(""))
.copyContext(new CopyContext()).build();
Collection<CopyEntity> copyEntities = icebergDataset.generateCopyEntities(destFs, copyConfiguration);
verifyFsOwnershipAndPermissionPreservation(copyEntities, expectedPathsAndFileStatuses);
}
/**
* exercise {@link IcebergDataset::getFilePaths} and validate the result
* @return {@link IcebergTable} (mock!), for behavioral verification
*/
protected IcebergTable validateGetFilePathsGivenDestState(List<MockIcebergTable.SnapshotPaths> sourceSnapshotPathSets,
List<String> existingDestPaths, Set<Path> expectedResultPaths) throws IOException {
return validateGetFilePathsGivenDestState(sourceSnapshotPathSets, Optional.empty(), existingDestPaths,
expectedResultPaths);
}
/**
* exercise {@link IcebergDataset::getFilePaths} and validate the result
* @return {@link IcebergTable} (mock!), for behavioral verification
*/
protected IcebergTable validateGetFilePathsGivenDestState(List<MockIcebergTable.SnapshotPaths> sourceSnapshotPathSets,
Optional<List<String>> optExistingSourcePaths, List<String> existingDestPaths, Set<Path> expectedResultPaths) throws IOException {
IcebergTable icebergTable = MockIcebergTable.withSnapshots(sourceSnapshotPathSets);
MockFileSystemBuilder sourceFsBuilder = new MockFileSystemBuilder(SRC_FS_URI, !optExistingSourcePaths.isPresent());
optExistingSourcePaths.ifPresent(sourceFsBuilder::addPaths);
FileSystem sourceFs = sourceFsBuilder.build();
IcebergDataset icebergDataset =
new IcebergDataset(testDbName, testTblName, icebergTable, null, new Properties(), sourceFs);
MockFileSystemBuilder destFsBuilder = new MockFileSystemBuilder(DEST_FS_URI);
destFsBuilder.addPaths(existingDestPaths);
FileSystem destFs = destFsBuilder.build();
CopyConfiguration copyConfiguration = createEmptyCopyConfiguration(destFs);
Map<Path, FileStatus> filePathsToFileStatus = icebergDataset.getFilePathsToFileStatus(destFs, copyConfiguration);
Assert.assertEquals(filePathsToFileStatus.keySet(), expectedResultPaths);
// verify solely the path portion of the `FileStatus`, since that's all mock sets up
Assert.assertEquals(
filePathsToFileStatus.values().stream().map(FileStatus::getPath).collect(Collectors.toSet()),
expectedResultPaths);
return icebergTable;
}
/** @return `paths` after adding to it all paths of every one of `snapshotDefs` */
protected static Set<Path> withAllSnapshotPaths(Set<Path> paths, MockIcebergTable.SnapshotPaths... snapshotDefs) {
Arrays.stream(snapshotDefs).flatMap(snapshotDef ->
snapshotDef.asSnapshotInfo().getAllPaths().stream())
.forEach(p ->
paths.add(new Path(p))
);
return paths;
}
private CopyConfiguration createEmptyCopyConfiguration(FileSystem fs) {
return CopyConfiguration.builder(fs, copyConfigProperties).copyContext(new CopyContext()).build();
}
private static void verifyCopyEntities(Collection<CopyEntity> copyEntities, List<String> expected) {
List<String> actual = new ArrayList<>();
for (CopyEntity copyEntity : copyEntities) {
String json = copyEntity.toString();
if (isCopyableFile(json)) {
String filepath = CopyEntityDeserializer.getFilePathAsStringFromJson(json);
actual.add(filepath);
} else{
verifyPostPublishStep(json);
}
}
Assert.assertEquals(actual.size(), expected.size(), "Set" + actual.toString() + " vs Set" + expected.toString());
Assert.assertEqualsNoOrder(actual.toArray(), expected.toArray());
}
private static boolean isCopyableFile(String json) {
String objectType = new Gson().fromJson(json, JsonObject.class)
.getAsJsonPrimitive("object-type")
.getAsString();
return objectType.equals("org.apache.gobblin.data.management.copy.CopyableFile");
}
private static void verifyFsOwnershipAndPermissionPreservation(Collection<CopyEntity> copyEntities, Map<Path, FileStatus> expectedPathsAndFileStatuses) {
for (CopyEntity copyEntity : copyEntities) {
String copyEntityJson = copyEntity.toString();
if (isCopyableFile(copyEntityJson)) {
List<CopyEntityDeserializer.FileOwnerAndPermissions> ancestorFileOwnerAndPermissionsList =
CopyEntityDeserializer.getAncestorOwnerAndPermissions(copyEntityJson);
CopyEntityDeserializer.FileOwnerAndPermissions destinationFileOwnerAndPermissions = CopyEntityDeserializer.getDestinationOwnerAndPermissions(copyEntityJson);
Path filePath = new Path(CopyEntityDeserializer.getFilePathAsStringFromJson(copyEntityJson));
FileStatus fileStatus = expectedPathsAndFileStatuses.get(filePath);
verifyFileStatus(destinationFileOwnerAndPermissions, fileStatus);
// providing path's parent to verify ancestor owner and permissions
verifyAncestorPermissions(ancestorFileOwnerAndPermissionsList, filePath.getParent(),
expectedPathsAndFileStatuses);
} else {
verifyPostPublishStep(copyEntityJson);
}
}
}
private static void verifyFileStatus(CopyEntityDeserializer.FileOwnerAndPermissions actual, FileStatus expected) {
Assert.assertEquals(actual.owner, expected.getOwner());
Assert.assertEquals(actual.group, expected.getGroup());
Assert.assertEquals(actual.userActionPermission, expected.getPermission().getUserAction().toString());
Assert.assertEquals(actual.groupActionPermission, expected.getPermission().getGroupAction().toString());
Assert.assertEquals(actual.otherActionPermission, expected.getPermission().getOtherAction().toString());
}
private static void verifyAncestorPermissions(List<CopyEntityDeserializer.FileOwnerAndPermissions> actualList, Path path, Map<Path, FileStatus> pathFileStatusMap) {
for (CopyEntityDeserializer.FileOwnerAndPermissions actual : actualList) {
FileStatus expected = pathFileStatusMap.getOrDefault(path, new FileStatus());
verifyFileStatus(actual, expected);
path = path.getParent();
}
}
private static void verifyPostPublishStep(String json) {
String expectedCommitStep = "org.apache.gobblin.data.management.copy.iceberg.IcebergRegisterStep";
String actualCommitStep = new Gson().fromJson(json, JsonObject.class)
.getAsJsonObject("object-data").getAsJsonObject("step").getAsJsonPrimitive("object-type").getAsString();
Assert.assertEquals(actualCommitStep, expectedCommitStep);
}
/**
* Sadly, this is needed to avoid losing `FileSystem` mock to replacement from the `FileSystem.get` `static`
* Without this, so to lose the mock, we'd be unable to set up any source paths as existing.
*/
protected static class TrickIcebergDataset extends IcebergDataset {
public TrickIcebergDataset(String db, String table, IcebergTable srcIcebergTbl, IcebergTable destIcebergTbl, Properties properties,
FileSystem sourceFs) {
super(db, table, srcIcebergTbl, destIcebergTbl, properties, sourceFs);
}
@Override // as the `static` is not mock-able
protected FileSystem getSourceFileSystemFromFileStatus(FileStatus fileStatus, Configuration hadoopConfig) throws IOException {
return this.sourceFs;
}
}
;
/** Builds a {@link FileSystem} mock */
protected static class MockFileSystemBuilder {
private final URI fsURI;
/** when not `.isPresent()`, all paths exist; when `.get().isEmpty()`, none exist; else only those indicated do */
private final Optional<Map<Path, FileStatus>> optPathsWithFileStatuses;
public MockFileSystemBuilder(URI fsURI) {
this(fsURI, false);
}
public MockFileSystemBuilder(URI fsURI, boolean shouldRepresentEveryPath) {
this.fsURI = fsURI;
this.optPathsWithFileStatuses = shouldRepresentEveryPath ? Optional.empty() : Optional.of(Maps.newHashMap());
}
public void addPaths(List<String> pathStrings) {
Map<String, FileStatus> map = Maps.newHashMap();
for (String pathString : pathStrings) {
map.putIfAbsent(pathString, null);
}
addPathsAndFileStatuses(map);
}
public void addPathsAndFileStatuses(Map<String, FileStatus> pathAndFileStatuses) {
for (Map.Entry<String, FileStatus> entry : pathAndFileStatuses.entrySet()) {
String pathString = entry.getKey();
FileStatus fileStatus = entry.getValue();
addPathsAndFileStatuses(pathString, fileStatus);
}
}
public void addPathsAndFileStatuses(String pathString, FileStatus fileStatus) {
Path path = new Path(pathString);
if(fileStatus != null) { fileStatus.setPath(path);}
addPathAndFileStatus(path, fileStatus);
}
public void addPathAndFileStatus(Path path, FileStatus fileStatus) {
if (!this.optPathsWithFileStatuses.isPresent()) {
throw new IllegalStateException("unable to add paths and file statuses when constructed");
}
optPathsWithFileStatuses.get().putIfAbsent(path, fileStatus);
if (!path.isRoot()) { // recursively add ancestors of a previously unknown path
addPathAndFileStatus(path.getParent(), fileStatus);
}
}
public Map<Path, FileStatus> getPathsAndFileStatuses() {
return optPathsWithFileStatuses.get();
}
public FileSystem build()
throws IOException {
FileSystem fs = Mockito.mock(FileSystem.class);
Mockito.when(fs.getUri()).thenReturn(fsURI);
Mockito.when(fs.makeQualified(any(Path.class)))
.thenAnswer(invocation -> invocation.getArgument(0, Path.class).makeQualified(fsURI, new Path("/")));
if (!this.optPathsWithFileStatuses.isPresent()) {
Mockito.when(fs.getFileStatus(any(Path.class)))
.thenAnswer(invocation -> createEmptyFileStatus(invocation.getArgument(0, Path.class).toString()));
} else {
// WARNING: order is critical--specific paths *after* `any(Path)`; in addition, since mocking further
// an already-mocked instance, `.doReturn/.when` is needed (vs. `.when/.thenReturn`)
Mockito.when(fs.getFileStatus(any(Path.class))).thenThrow(new FileNotFoundException());
for (Map.Entry<Path, FileStatus> entry : this.optPathsWithFileStatuses.get().entrySet()) {
Path p = entry.getKey();
FileStatus fileStatus = entry.getValue();
Mockito.doReturn(fileStatus != null ? fileStatus : createEmptyFileStatus(p.toString())).when(fs).getFileStatus(p);
}
}
return fs;
}
protected static FileStatus createEmptyFileStatus(String pathString) throws IOException {
Path path = new Path(pathString);
FileStatus fileStatus = new FileStatus();
fileStatus.setPath(path);
return fileStatus;
}
}
private static class MockIcebergTable {
@Data
public static class SnapshotPaths {
private final Optional<String> metadataPath;
private final String manifestListPath;
private final List<IcebergSnapshotInfo.ManifestFileInfo> manifestFiles;
public IcebergSnapshotInfo asSnapshotInfo() {
return asSnapshotInfo(0L);
}
/** @param snapshotIdIndex used both as snapshot ID and as snapshot (epoch) timestamp */
public IcebergSnapshotInfo asSnapshotInfo(long snapshotIdIndex) {
return asSnapshotInfo(snapshotIdIndex, Instant.ofEpochMilli(snapshotIdIndex));
}
public IcebergSnapshotInfo asSnapshotInfo(Long snapshotId, Instant timestamp) {
return new IcebergSnapshotInfo(snapshotId, timestamp, this.metadataPath, this.manifestListPath, this.manifestFiles);
}
}
public static IcebergTable withSnapshots(List<SnapshotPaths> snapshotPathSets) throws IOException {
IcebergTable table = Mockito.mock(IcebergTable.class);
int lastIndex = snapshotPathSets.size() - 1;
Mockito.when(table.getCurrentSnapshotInfoOverviewOnly())
.thenReturn(snapshotPathSets.get(lastIndex).asSnapshotInfo(lastIndex));
// ADMISSION: this is strictly more analogous to `IcebergTable.getAllSnapshotInfosIterator()`, as it doesn't
// filter only the delta... nonetheless, it should work fine for the tests herein
Mockito.when(table.getIncrementalSnapshotInfosIterator()).thenReturn(
IndexingStreams.transformWithIndex(snapshotPathSets.stream(),
(pathSet, i) -> pathSet.asSnapshotInfo(i)).iterator());
return table;
}
}
public static class IndexingStreams {
/** @return {@link Stream} equivalent of `inputs.zipWithIndex.map(f)` in scala */
public static <T, R> Stream<R> transformWithIndex(Stream<T> inputs, BiFunction<T, Integer, R> f) {
// given sketchy import, sequester for now within enclosing test class, rather than adding to `gobblin-utility`
return org.apache.iceberg.relocated.com.google.common.collect.Streams.zip(
inputs, IntStream.iterate(0, i -> i + 1).boxed(), f);
}
}
private static class CopyEntityDeserializer {
@Data
public static class FileOwnerAndPermissions {
String owner;
String group;
// assigning default values
String userActionPermission = FsAction.valueOf("READ_WRITE").toString();
String groupActionPermission = FsAction.valueOf("READ_WRITE").toString();
String otherActionPermission = FsAction.valueOf("READ_WRITE").toString();
}
public static String getFilePathAsStringFromJson(String json) {
String filepath = new Gson().fromJson(json, JsonObject.class)
.getAsJsonObject("object-data")
.getAsJsonObject("origin")
.getAsJsonObject("object-data").getAsJsonObject("path").getAsJsonObject("object-data")
.getAsJsonObject("uri").getAsJsonPrimitive("object-data").getAsString();
return filepath;
}
public static List<FileOwnerAndPermissions> getAncestorOwnerAndPermissions(String json) {
JsonArray ancestorsOwnerAndPermissions = new Gson().fromJson(json, JsonObject.class)
.getAsJsonObject("object-data")
.getAsJsonArray("ancestorsOwnerAndPermission");
List<FileOwnerAndPermissions> fileOwnerAndPermissionsList = Lists.newArrayList();
for (JsonElement jsonElement : ancestorsOwnerAndPermissions) {
fileOwnerAndPermissionsList.add(getFileOwnerAndPermissions(jsonElement.getAsJsonObject()));
}
return fileOwnerAndPermissionsList;
}
public static FileOwnerAndPermissions getDestinationOwnerAndPermissions(String json) {
JsonObject destinationOwnerAndPermissionsJsonObject = new Gson().fromJson(json, JsonObject.class)
.getAsJsonObject("object-data")
.getAsJsonObject("destinationOwnerAndPermission");
FileOwnerAndPermissions fileOwnerAndPermissions = getFileOwnerAndPermissions(destinationOwnerAndPermissionsJsonObject);
return fileOwnerAndPermissions;
}
private static FileOwnerAndPermissions getFileOwnerAndPermissions(JsonObject jsonObject) {
FileOwnerAndPermissions fileOwnerAndPermissions = new FileOwnerAndPermissions();
JsonObject objData = jsonObject.getAsJsonObject("object-data");
fileOwnerAndPermissions.owner = objData.has("owner") ? objData.getAsJsonPrimitive("owner").getAsString() : Strings.EMPTY;
fileOwnerAndPermissions.group = objData.has("group") ? objData.getAsJsonPrimitive("group").getAsString() : Strings.EMPTY;
JsonObject fsPermission = objData.has("fsPermission") ? objData.getAsJsonObject("fsPermission") : null;
if (fsPermission != null) {
JsonObject objectData = fsPermission.getAsJsonObject("object-data");
fileOwnerAndPermissions.userActionPermission =
objectData.getAsJsonObject("useraction").getAsJsonPrimitive("object-data").getAsString();
fileOwnerAndPermissions.groupActionPermission =
objectData.getAsJsonObject("groupaction").getAsJsonPrimitive("object-data").getAsString();
fileOwnerAndPermissions.otherActionPermission =
objectData.getAsJsonObject("otheraction").getAsJsonPrimitive("object-data").getAsString();
}
return fileOwnerAndPermissions;
}
}
}
| 2,357 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/iceberg/IcebergTableTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.iceberg;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Optional;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import org.apache.iceberg.AppendFiles;
import org.apache.iceberg.CatalogProperties;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
import org.apache.iceberg.avro.AvroSchemaUtil;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.hive.HiveMetastoreTest;
import org.apache.iceberg.shaded.org.apache.avro.SchemaBuilder;
import org.testng.Assert;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
/** Test {@link org.apache.gobblin.data.management.copy.iceberg.IcebergTable} */
public class IcebergTableTest extends HiveMetastoreTest {
protected static final org.apache.iceberg.shaded.org.apache.avro.Schema avroDataSchema =
SchemaBuilder.record("test")
.fields()
.name("id")
.type()
.longType()
.noDefault()
.endRecord();
protected static final Schema icebergSchema = AvroSchemaUtil.toIceberg(avroDataSchema);
protected static final PartitionSpec icebergPartitionSpec = PartitionSpec.builderFor(icebergSchema)
.identity("id")
.build();
private final String dbName = "myicebergdb";
private final String tableName = "justtesting";
private TableIdentifier tableId;
private Table table;
private String catalogUri;
private String metadataBasePath;
@BeforeClass
public void setUp() throws Exception {
startMetastore();
catalog.createNamespace(Namespace.of(dbName));
}
@BeforeMethod
public void setUpEachTest() {
tableId = TableIdentifier.of(dbName, tableName);
table = catalog.createTable(tableId, icebergSchema);
catalogUri = catalog.getConf().get(CatalogProperties.URI);
metadataBasePath = calcMetadataBasePath(tableId);
}
@AfterMethod
public void cleanUpEachTest() {
catalog.dropTable(tableId);
}
/** Verify info about the current snapshot only */
@Test
public void testGetCurrentSnapshotInfo() throws IOException {
List<List<String>> perSnapshotFilesets = Lists.newArrayList(
Lists.newArrayList("/path/to/data-a0.orc"),
Lists.newArrayList("/path/to/data-b0.orc", "/path/to/data-b1.orc"),
Lists.newArrayList("/path/to/data-c0.orc", "/path/to/data-c1.orc", "/path/to/data-c2.orc"),
Lists.newArrayList("/path/to/data-d0.orc")
);
initializeSnapshots(table, perSnapshotFilesets);
IcebergSnapshotInfo snapshotInfo = new IcebergTable(tableId, catalog.newTableOps(tableId), catalogUri).getCurrentSnapshotInfo();
verifySnapshotInfo(snapshotInfo, perSnapshotFilesets, perSnapshotFilesets.size());
}
/** Verify failure when attempting to get current snapshot info for non-existent table */
@Test(expectedExceptions = IcebergTable.TableNotFoundException.class)
public void testGetCurrentSnapshotInfoOnBogusTable() throws IOException {
TableIdentifier bogusTableId = TableIdentifier.of(dbName, tableName + "_BOGUS");
IcebergSnapshotInfo snapshotInfo = new IcebergTable(bogusTableId, catalog.newTableOps(bogusTableId), catalogUri).getCurrentSnapshotInfo();
Assert.fail("expected an exception when using table ID '" + bogusTableId + "'");
}
/** Verify info about all (full) snapshots */
@Test
public void testGetAllSnapshotInfosIterator() throws IOException {
List<List<String>> perSnapshotFilesets = Lists.newArrayList(
Lists.newArrayList("/path/to/data-a0.orc"),
Lists.newArrayList("/path/to/data-b0.orc", "/path/to/data-b1.orc"),
Lists.newArrayList("/path/to/data-c0.orc", "/path/to/data-c1.orc", "/path/to/data-c2.orc"),
Lists.newArrayList("/path/to/data-d0.orc")
);
initializeSnapshots(table, perSnapshotFilesets);
List<IcebergSnapshotInfo> snapshotInfos = Lists.newArrayList(new IcebergTable(tableId, catalog.newTableOps(tableId), catalogUri).getAllSnapshotInfosIterator());
Assert.assertEquals(snapshotInfos.size(), perSnapshotFilesets.size(), "num snapshots");
for (int i = 0; i < snapshotInfos.size(); ++i) {
System.err.println("verifying snapshotInfo[" + i + "]");
verifySnapshotInfo(snapshotInfos.get(i), perSnapshotFilesets.subList(0, i + 1), snapshotInfos.size());
}
}
/** Verify info about all snapshots (incremental deltas) */
@Test
public void testGetIncrementalSnapshotInfosIterator() throws IOException {
List<List<String>> perSnapshotFilesets = Lists.newArrayList(
Lists.newArrayList("/path/to/data-a0.orc"),
Lists.newArrayList("/path/to/data-b0.orc", "/path/to/data-b1.orc"),
Lists.newArrayList("/path/to/data-c0.orc", "/path/to/data-c1.orc", "/path/to/data-c2.orc"),
Lists.newArrayList("/path/to/data-d0.orc")
);
initializeSnapshots(table, perSnapshotFilesets);
List<IcebergSnapshotInfo> snapshotInfos = Lists.newArrayList(new IcebergTable(tableId, catalog.newTableOps(tableId), catalogUri).getIncrementalSnapshotInfosIterator());
Assert.assertEquals(snapshotInfos.size(), perSnapshotFilesets.size(), "num snapshots");
for (int i = 0; i < snapshotInfos.size(); ++i) {
System.err.println("verifying snapshotInfo[" + i + "]");
verifySnapshotInfo(snapshotInfos.get(i), perSnapshotFilesets.subList(i, i + 1), snapshotInfos.size());
}
}
/** Verify info about all snapshots (incremental deltas) correctly eliminates repeated data files */
@Test
public void testGetIncrementalSnapshotInfosIteratorRepeatedFiles() throws IOException {
List<List<String>> perSnapshotFilesets = Lists.newArrayList(
Lists.newArrayList("/path/to/data-a0.orc"),
Lists.newArrayList("/path/to/data-b0.orc", "/path/to/data-b1.orc", "/path/to/data-a0.orc"),
Lists.newArrayList("/path/to/data-a0.orc","/path/to/data-c0.orc", "/path/to/data-b1.orc", "/path/to/data-c1.orc", "/path/to/data-c2.orc"),
Lists.newArrayList("/path/to/data-d0.orc")
);
initializeSnapshots(table, perSnapshotFilesets);
List<IcebergSnapshotInfo> snapshotInfos = Lists.newArrayList(new IcebergTable(tableId, catalog.newTableOps(tableId), catalogUri).getIncrementalSnapshotInfosIterator());
Assert.assertEquals(snapshotInfos.size(), perSnapshotFilesets.size(), "num snapshots");
for (int i = 0; i < snapshotInfos.size(); ++i) {
System.err.println("verifying snapshotInfo[" + i + "] - " + snapshotInfos.get(i));
char initialChar = (char) ((int) 'a' + i);
// adjust expectations to eliminate duplicate entries (i.e. those bearing letter not aligned with ordinal fileset)
List<String> fileset = perSnapshotFilesets.get(i).stream().filter(name -> {
String uniquePortion = name.substring("/path/to/data-".length());
return uniquePortion.startsWith(Character.toString(initialChar));
}).collect(Collectors.toList());
verifySnapshotInfo(snapshotInfos.get(i), Arrays.asList(fileset), snapshotInfos.size());
}
}
/** full validation for a particular {@link IcebergSnapshotInfo} */
protected void verifySnapshotInfo(IcebergSnapshotInfo snapshotInfo, List<List<String>> perSnapshotFilesets, int overallNumSnapshots) {
// verify metadata file
snapshotInfo.getMetadataPath().ifPresent(metadataPath -> {
Optional<File> optMetadataFile = extractSomeMetadataFilepath(metadataPath, metadataBasePath, IcebergTableTest::doesResembleMetadataFilename);
Assert.assertTrue(optMetadataFile.isPresent(), "has metadata filepath");
verifyMetadataFile(optMetadataFile.get(), Optional.of(overallNumSnapshots));
}
);
// verify manifest list file
Optional<File> optManifestListFile = extractSomeMetadataFilepath(snapshotInfo.getManifestListPath(), metadataBasePath, IcebergTableTest::doesResembleManifestListFilename);
Assert.assertTrue(optManifestListFile.isPresent(), "has manifest list filepath");
verifyManifestListFile(optManifestListFile.get(), Optional.of(snapshotInfo.getSnapshotId()));
// verify manifest files and their listed data files
List<IcebergSnapshotInfo.ManifestFileInfo> manifestFileInfos = snapshotInfo.getManifestFiles();
verifyManifestFiles(manifestFileInfos, snapshotInfo.getManifestFilePaths(), perSnapshotFilesets);
verifyAnyOrder(snapshotInfo.getAllDataFilePaths(), flatten(perSnapshotFilesets), "data filepaths");
// verify all aforementioned paths collectively equal `getAllPaths()`
List<String> allPathsExpected = Lists.newArrayList(snapshotInfo.getManifestListPath());
snapshotInfo.getMetadataPath().ifPresent(allPathsExpected::add);
allPathsExpected.addAll(snapshotInfo.getManifestFilePaths());
allPathsExpected.addAll(snapshotInfo.getAllDataFilePaths());
verifyAnyOrder(snapshotInfo.getAllPaths(), allPathsExpected, "all paths, metadata and data");
}
protected String calcMetadataBasePath(TableIdentifier tableId) {
return calcMetadataBasePath(tableId.namespace().toString(), tableId.name());
}
protected String calcMetadataBasePath(String theDbName, String theTableName) {
String basePath = String.format("%s/%s/metadata", metastore.getDatabasePath(theDbName), theTableName);
System.err.println("calculated metadata base path: '" + basePath + "'");
return basePath;
}
/** Add one snapshot per sub-list of `perSnapshotFilesets`, in order, with the sub-list contents as the data files */
protected static void initializeSnapshots(Table table, List<List<String>> perSnapshotFilesets) {
for (List<String> snapshotFileset : perSnapshotFilesets) {
AppendFiles append = table.newAppend();
for (String fpath : snapshotFileset) {
append.appendFile(createDataFile(fpath, 0, 1));
}
append.commit();
}
}
/** Extract whatever kind of iceberg metadata file, iff recognized by `doesResemble` */
protected static Optional<File> extractSomeMetadataFilepath(String candidatePath, String basePath, Predicate<String> doesResemble) {
try {
URI candidateUri = new URI(candidatePath);
File file = new File(candidateUri.getPath());
Assert.assertEquals(file.getParent(), basePath, "metadata base dirpath");
return Optional.ofNullable(doesResemble.test(file.getName()) ? file : null);
} catch (URISyntaxException e) {
throw new RuntimeException(e); // should not happen!
}
}
protected void verifyMetadataFile(File file, Optional<Integer> optSnapshotSeqNum) {
Assert.assertTrue(doesResembleMetadataFilename(file.getName()), "metadata filename resemblance");
if (optSnapshotSeqNum.isPresent()) {
Assert.assertEquals(Integer.valueOf(file.getName().split("-")[0]), optSnapshotSeqNum.get(),
"snapshot sequence num");
}
}
protected void verifyManifestListFile(File file, Optional<Long> optSnapshotId) {
Assert.assertTrue(doesResembleManifestListFilename(file.getName()),"manifest list filename resemblance");
if (optSnapshotId.isPresent()) {
Assert.assertEquals(Long.valueOf(file.getName().split("-")[1]), optSnapshotId.get(), "snapshot id");
}
}
protected void verifyManifestFiles(List<IcebergSnapshotInfo.ManifestFileInfo> manifestFileInfos,
List<String> manifestFilePaths,
List<List<String>> perSnapshotFilesets) {
Assert.assertEquals(manifestFileInfos.size(), manifestFilePaths.size());
Assert.assertEquals(manifestFileInfos.size(), perSnapshotFilesets.size());
int numManifests = manifestFileInfos.size();
for (int i = 0; i < numManifests; ++i) {
IcebergSnapshotInfo.ManifestFileInfo mfi = manifestFileInfos.get(i);
Assert.assertTrue(doesResembleManifestFilename(mfi.getManifestFilePath()), "manifest filename resemblance");
Assert.assertEquals(mfi.getManifestFilePath(), manifestFilePaths.get(i));
verifyAnyOrder(mfi.getListedFilePaths(), perSnapshotFilesets.get(numManifests - i - 1),
"manifest contents of '" + mfi.getManifestFilePath() + "'");
}
}
protected static boolean doesResembleMetadataFilename(String name) {
return name.endsWith(".metadata.json");
}
protected static boolean doesResembleManifestListFilename(String name) {
return name.startsWith("snap-") && name.endsWith(".avro");
}
protected static boolean doesResembleManifestFilename(String name) {
return !name.startsWith("snap-") && name.endsWith(".avro");
}
/** doesn't actually create a physical file (on disk), merely a {@link org.apache.iceberg.DataFile} */
protected static DataFile createDataFile(String path, long sizeBytes, long numRecords) {
return DataFiles.builder(icebergPartitionSpec)
.withPath(path)
.withFileSizeInBytes(sizeBytes)
.withRecordCount(numRecords)
.build();
}
/** general utility: order-independent/set equality between collections */
protected static <T> void verifyAnyOrder(Collection<T> actual, Collection<T> expected, String message) {
Assert.assertEquals(Sets.newHashSet(actual), Sets.newHashSet(expected), message);
}
/** general utility: flatten a collection of collections into a single-level {@link List} */
protected static <T, C extends Collection<T>> List<T> flatten(Collection<C> cc) {
return cc.stream().flatMap(x -> x.stream()).collect(Collectors.toList());
}
}
| 2,358 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/writer/TarArchiveInputStreamDataWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.writer;
import java.io.FileInputStream;
import java.io.IOException;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import com.google.common.collect.Iterables;
import com.google.common.io.Files;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.copy.CopySource;
import org.apache.gobblin.data.management.copy.CopyableDatasetMetadata;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.CopyableFileUtils;
import org.apache.gobblin.data.management.copy.FileAwareInputStream;
import org.apache.gobblin.data.management.copy.OwnerAndPermission;
import org.apache.gobblin.data.management.copy.TestCopyableDataset;
import org.apache.gobblin.data.management.copy.converter.UnGzipConverter;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.TestUtils;
public class TarArchiveInputStreamDataWriterTest {
private FileSystem fs;
private Path testTempPath;
@BeforeClass
public void setup() throws Exception {
fs = FileSystem.getLocal(new Configuration());
testTempPath =
new Path(Files.createTempDir().getAbsolutePath(), "tarArchiveInputStreamDataWriterTest");
fs.mkdirs(testTempPath);
}
@DataProvider(name = "testFileDataProvider")
public static Object[][] fileDataProvider() {
// {filePath, newFileName, expectedText}
return new Object[][] { { "tarArchiveInputStreamDataWriterTest/archived.tar.gz", "archived.tar.gz", "text" },
{ "tarArchiveInputStreamDataWriterTest/archived.tgz", "archived_new_name", "text" } };
}
@Test(dataProvider = "testFileDataProvider")
public void testWrite(final String filePath, final String newFileName, final String expectedText) throws Exception {
String expectedFileContents = "text";
String fileNameInArchive = "text.txt";
WorkUnitState state = TestUtils.createTestWorkUnitState();
state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString());
state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString());
state.setProp(ConfigurationKeys.WRITER_FILE_PATH, "writer_file_path_" + RandomStringUtils.randomAlphabetic(5));
CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));
CopySource.serializeCopyableDataset(state, metadata);
FileAwareInputStream fileAwareInputStream = getCompressedInputStream(filePath, newFileName);
CopySource.serializeCopyEntity(state, fileAwareInputStream.getFile());
TarArchiveInputStreamDataWriter dataWriter = new TarArchiveInputStreamDataWriter(state, 1, 0);
dataWriter.write(fileAwareInputStream);
dataWriter.commit();
// the archive file contains file test.txt
Path unArchivedFilePath = new Path(fileAwareInputStream.getFile().getDestination(), fileNameInArchive);
// Path at which the writer writes text.txt
Path taskOutputFilePath =
new Path(new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR),
fileAwareInputStream.getFile().getDatasetAndPartition(metadata).identifier()),
PathUtils.withoutLeadingSeparator(unArchivedFilePath));
Assert.assertEquals(IOUtils.toString(new FileInputStream(taskOutputFilePath.toString())).trim(),
expectedFileContents);
}
/**
* Find the test compressed file <code><filePath/code> in classpath and read it as a {@link FileAwareInputStream}
*/
private FileAwareInputStream getCompressedInputStream(final String filePath, final String newFileName) throws Exception {
UnGzipConverter converter = new UnGzipConverter();
FileSystem fs = FileSystem.getLocal(new Configuration());
String fullPath = getClass().getClassLoader().getResource(filePath).getFile();
FileStatus status = fs.getFileStatus(testTempPath);
OwnerAndPermission ownerAndPermission =
new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL,
FsAction.ALL));
CopyableFile cf =
CopyableFileUtils.getTestCopyableFile(filePath, new Path(testTempPath, newFileName).toString(), newFileName, 0L,
ownerAndPermission);
FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf)
.inputStream(fs.open(new Path(fullPath))).build();
Iterable<FileAwareInputStream> iterable =
converter.convertRecord("outputSchema", fileAwareInputStream, new WorkUnitState());
return Iterables.getFirst(iterable, null);
}
@AfterClass
public void cleanup() {
try {
fs.delete(testTempPath, true);
} catch (IOException e) {
// ignore
}
}
}
| 2,359 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/writer/FileAwareInputStreamDataWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.writer;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.URI;
import java.nio.charset.StandardCharsets;
import java.nio.file.AccessDeniedException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.crypto.EncryptionConfigParser;
import org.apache.gobblin.crypto.GPGFileDecryptor;
import org.apache.gobblin.crypto.GPGFileEncryptor;
import org.apache.gobblin.crypto.GPGFileEncryptorTest;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopySource;
import org.apache.gobblin.data.management.copy.CopyableDatasetMetadata;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.CopyableFileUtils;
import org.apache.gobblin.data.management.copy.FileAwareInputStream;
import org.apache.gobblin.data.management.copy.OwnerAndPermission;
import org.apache.gobblin.data.management.copy.PreserveAttributes;
import org.apache.gobblin.data.management.copy.TestCopyableDataset;
import org.apache.gobblin.data.management.copy.splitter.DistcpFileSplitter;
import org.apache.gobblin.util.TestUtils;
import org.apache.gobblin.util.WriterUtils;
import org.apache.gobblin.util.io.StreamUtils;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doCallRealMethod;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.spy;
public class FileAwareInputStreamDataWriterTest {
TestLocalFileSystem fs;
Path testTempPath;
@BeforeClass
public void setup() throws Exception {
fs = new TestLocalFileSystem();
fs.initialize(URI.create("file:///"), new Configuration());
testTempPath = new Path(Files.createTempDir().getAbsolutePath(), "InputStreamDataWriterTest");
fs.mkdirs(testTempPath);
}
@Test
public void testWrite() throws Exception {
String streamString1 = "testContents1";
String streamString2 = "testContents2";
String userDefStagingDir = System.getProperty("user.dir") + "/user_staging_dir";
FileStatus status = fs.getFileStatus(testTempPath);
OwnerAndPermission ownerAndPermission =
new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
CopyableFile cf = CopyableFileUtils.getTestCopyableFile((long) streamString1.length(), ownerAndPermission);
CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));
WorkUnitState state = TestUtils.createTestWorkUnitState();
state.setProp(ConfigurationKeys.USER_DEFINED_STAGING_DIR_FLAG,false);
state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString());
state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString());
state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5));
CopySource.serializeCopyEntity(state, cf);
CopySource.serializeCopyableDataset(state, metadata);
FileAwareInputStreamDataWriter dataWriter = new FileAwareInputStreamDataWriter(state, 1, 0);
FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf)
.inputStream(StreamUtils.convertStream(IOUtils.toInputStream(streamString1))).build();
Assert.assertNotEquals(dataWriter.stagingDir,userDefStagingDir);
dataWriter.write(fileAwareInputStream);
dataWriter.commit();
Path writtenFilePath = new Path(new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR),
cf.getDatasetAndPartition(metadata).identifier()), cf.getDestination());
Assert.assertEquals(IOUtils.toString(new FileInputStream(writtenFilePath.toString())), streamString1);
//testing user defined staging directory
WorkUnitState state2 = TestUtils.createTestWorkUnitState();
state2.setProp(ConfigurationKeys.USER_DEFINED_STAGING_DIR_FLAG,true);
state2.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString());
state2.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output2").toString());
state2.setProp(ConfigurationKeys.USER_DEFINED_STATIC_STAGING_DIR,userDefStagingDir);
state2.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5));
CopySource.serializeCopyEntity(state2, cf);
CopySource.serializeCopyableDataset(state2, metadata);
dataWriter = new FileAwareInputStreamDataWriter(state2, 1, 0);
fileAwareInputStream = FileAwareInputStream.builder().file(cf)
.inputStream(StreamUtils.convertStream(IOUtils.toInputStream(streamString2))).build();
Assert.assertEquals(dataWriter.stagingDir.toUri().toString(),userDefStagingDir);
dataWriter.write(fileAwareInputStream);
dataWriter.commit();
writtenFilePath = new Path(new Path(state2.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR),
cf.getDatasetAndPartition(metadata).identifier()), cf.getDestination());
Assert.assertEquals(IOUtils.toString(new FileInputStream(writtenFilePath.toString())), streamString2);
}
@Test
public void testBlockWrite() throws Exception {
String streamString = "testContents";
FileStatus status = fs.getFileStatus(testTempPath);
OwnerAndPermission ownerAndPermission =
new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
CopyableFile cf = CopyableFileUtils.getTestCopyableFile(ownerAndPermission);
CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));
WorkUnitState state = TestUtils.createTestWorkUnitState();
state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString());
state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString());
state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5));
state.setProp(DistcpFileSplitter.SPLIT_ENABLED, true);
CopySource.serializeCopyEntity(state, cf);
CopySource.serializeCopyableDataset(state, metadata);
FileAwareInputStreamDataWriter dataWriter = new FileAwareInputStreamDataWriter(state, 1, 0);
long splitLen = 4;
int splits = (int) (streamString.length() / splitLen + 1);
DistcpFileSplitter.Split split = new DistcpFileSplitter.Split(0, splitLen, 0, splits,
String.format("%s.__PART%d__", cf.getDestination().getName(), 0));
FSDataInputStream dataInputStream = StreamUtils.convertStream(IOUtils.toInputStream(streamString));
dataInputStream.seek(split.getLowPosition());
FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf)
.inputStream(dataInputStream)
.split(Optional.of(split))
.build();
dataWriter.write(fileAwareInputStream);
dataWriter.commit();
Path writtenFilePath = new Path(new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR),
cf.getDatasetAndPartition(metadata).identifier()), cf.getDestination());
Assert.assertEquals(IOUtils.toString(new FileInputStream(writtenFilePath.toString())),
streamString.substring(0, (int) splitLen));
}
@Test
public void testWriteWithEncryption() throws Exception {
byte[] streamString = "testEncryptedContents".getBytes("UTF-8");
byte[] expectedContents = new byte[streamString.length];
for (int i = 0; i < streamString.length; i++) {
expectedContents[i] = (byte)((streamString[i] + 1) % 256);
}
FileStatus status = fs.getFileStatus(testTempPath);
OwnerAndPermission ownerAndPermission =
new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
CopyableFile cf = CopyableFileUtils.getTestCopyableFile((long) streamString.length, ownerAndPermission);
CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));
WorkUnitState state = TestUtils.createTestWorkUnitState();
state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString());
state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString());
state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5));
state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY, "insecure_shift");
CopySource.serializeCopyEntity(state, cf);
CopySource.serializeCopyableDataset(state, metadata);
FileAwareInputStreamDataWriter dataWriter = new FileAwareInputStreamDataWriter(state, 1, 0);
FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf)
.inputStream(StreamUtils.convertStream(new ByteArrayInputStream(streamString))).build();
dataWriter.write(fileAwareInputStream);
dataWriter.commit();
Path writtenFilePath = new Path(new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR),
cf.getDatasetAndPartition(metadata).identifier()), cf.getDestination());
Assert.assertTrue(writtenFilePath.getName().endsWith("insecure_shift"),
"Expected encryption name to be appended to destination");
Assert.assertEquals(IOUtils.toByteArray(new FileInputStream(writtenFilePath.toString())), expectedContents);
}
@Test
public void testWriteWithGPGSymmetricEncryption() throws Exception {
byte[] streamString = "testEncryptedContents".getBytes("UTF-8");
FileStatus status = fs.getFileStatus(testTempPath);
OwnerAndPermission ownerAndPermission =
new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
CopyableFile cf = CopyableFileUtils.getTestCopyableFile((long) streamString.length, ownerAndPermission);
CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));
WorkUnitState state = TestUtils.createTestWorkUnitState();
state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString());
state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString());
state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5));
state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY, "gpg");
state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_KEYSTORE_PASSWORD_KEY, "testPassword");
CopySource.serializeCopyEntity(state, cf);
CopySource.serializeCopyableDataset(state, metadata);
FileAwareInputStreamDataWriter dataWriter = new FileAwareInputStreamDataWriter(state, 1, 0);
FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf)
.inputStream(StreamUtils.convertStream(new ByteArrayInputStream(streamString))).build();
dataWriter.write(fileAwareInputStream);
dataWriter.commit();
Path writtenFilePath = new Path(new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR),
cf.getDatasetAndPartition(metadata).identifier()), cf.getDestination());
Assert.assertTrue(writtenFilePath.getName().endsWith("gpg"),
"Expected encryption name to be appended to destination");
byte[] encryptedContent = IOUtils.toByteArray(new FileInputStream(writtenFilePath.toString()));
byte[] decryptedContent = new byte[streamString.length];
IOUtils.readFully(GPGFileDecryptor.decryptFile(new FileInputStream(writtenFilePath.toString()), "testPassword"),
decryptedContent);
// encrypted string should not be the same as the plaintext
Assert.assertNotEquals(encryptedContent, streamString);
// decrypted string should be the same as the plaintext
Assert.assertEquals(decryptedContent, streamString);
}
@Test
public void testWriteWithGPGAsymmetricEncryption() throws Exception {
byte[] streamString = "testEncryptedContents".getBytes("UTF-8");
FileStatus status = fs.getFileStatus(testTempPath);
OwnerAndPermission ownerAndPermission =
new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
CopyableFile cf = CopyableFileUtils.getTestCopyableFile((long) streamString.length, ownerAndPermission);
CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));
WorkUnitState state = TestUtils.createTestWorkUnitState();
state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString());
state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString());
state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5));
state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY, "gpg");
File publicKeyFile = new File(testTempPath.toString(), "public.key");
FileUtils.copyInputStreamToFile(GPGFileEncryptor.class.getResourceAsStream(GPGFileEncryptorTest.PUBLIC_KEY),
publicKeyFile);
state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_KEYSTORE_PATH_KEY, publicKeyFile.getAbsolutePath());
state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_KEYSTORE_PASSWORD_KEY,
GPGFileEncryptorTest.PASSPHRASE);
state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_KEY_NAME,
GPGFileEncryptorTest.KEY_ID);
CopySource.serializeCopyEntity(state, cf);
CopySource.serializeCopyableDataset(state, metadata);
FileAwareInputStreamDataWriter dataWriter = new FileAwareInputStreamDataWriter(state, 1, 0);
FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf)
.inputStream(StreamUtils.convertStream(new ByteArrayInputStream(streamString))).build();
dataWriter.write(fileAwareInputStream);
dataWriter.commit();
Path writtenFilePath = new Path(new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR),
cf.getDatasetAndPartition(metadata).identifier()), cf.getDestination());
Assert.assertTrue(writtenFilePath.getName().endsWith("gpg"),
"Expected encryption name to be appended to destination");
byte[] encryptedContent = IOUtils.toByteArray(new FileInputStream(writtenFilePath.toString()));
byte[] decryptedContent = new byte[streamString.length];
IOUtils.readFully(GPGFileDecryptor.decryptFile(new FileInputStream(writtenFilePath.toString()),
GPGFileEncryptor.class.getResourceAsStream(GPGFileEncryptorTest.PRIVATE_KEY),
GPGFileEncryptorTest.PASSPHRASE), decryptedContent);
// encrypted string should not be the same as the plaintext
Assert.assertNotEquals(encryptedContent, streamString);
// decrypted string should be the same as the plaintext
Assert.assertEquals(decryptedContent, streamString);
}
@Test
public void testCommit() throws IOException {
String destinationExistingToken = "destination";
String destinationAdditionalTokens = "path";
String fileName = "file";
// Asemble destination paths
Path destinationWithoutLeadingSeparator = new Path(new Path(destinationExistingToken, destinationAdditionalTokens), fileName);
Path destination = new Path("/", destinationWithoutLeadingSeparator);
// Create temp directory
File tmpFile = Files.createTempDir();
tmpFile.deleteOnExit();
Path tmpPath = new Path(tmpFile.getAbsolutePath());
// create origin file
Path originFile = new Path(tmpPath, fileName);
this.fs.createNewFile(originFile);
// create stating dir
Path stagingDir = new Path(tmpPath, "staging");
this.fs.mkdirs(stagingDir);
// create output dir
Path outputDir = new Path(tmpPath, "output");
this.fs.mkdirs(outputDir);
// create copyable file
FileStatus status = this.fs.getFileStatus(originFile);
FsPermission readWrite = new FsPermission(FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.READ_WRITE);
FsPermission dirReadWrite = new FsPermission(FsAction.ALL, FsAction.READ_WRITE, FsAction.READ_WRITE);
OwnerAndPermission ownerAndPermission = new OwnerAndPermission(status.getOwner(), status.getGroup(), readWrite);
List<OwnerAndPermission> ancestorOwnerAndPermissions = Lists.newArrayList();
ancestorOwnerAndPermissions.add(ownerAndPermission);
ancestorOwnerAndPermissions.add(ownerAndPermission);
ancestorOwnerAndPermissions.add(ownerAndPermission);
ancestorOwnerAndPermissions.add(ownerAndPermission);
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/publisher");
CopyableFile cf = CopyableFile.fromOriginAndDestination(this.fs, status, destination,
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties).publishDir(new Path("/target"))
.preserve(PreserveAttributes.fromMnemonicString("")).build())
.destinationOwnerAndPermission(ownerAndPermission)
.ancestorsOwnerAndPermission(ancestorOwnerAndPermissions)
.build();
// create work unit state
WorkUnitState state = createWorkUnitState(stagingDir, outputDir, cf);
CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));
CopySource.serializeCopyEntity(state, cf);
CopySource.serializeCopyableDataset(state, metadata);
// create writer
FileAwareInputStreamDataWriter writer = new FileAwareInputStreamDataWriter(state, 1, 0);
// create output of writer.write
Path writtenFile = writer.getStagingFilePath(cf);
this.fs.mkdirs(writtenFile.getParent());
this.fs.createNewFile(writtenFile);
// create existing directories in writer output
Path outputRoot = FileAwareInputStreamDataWriter.getPartitionOutputRoot(outputDir, cf.getDatasetAndPartition(metadata));
Path existingOutputPath = new Path(outputRoot, destinationExistingToken);
this.fs.mkdirs(existingOutputPath);
FileStatus fileStatus = this.fs.getFileStatus(existingOutputPath);
FsPermission existingPathPermission = fileStatus.getPermission();
// check initial state of the relevant directories
Assert.assertTrue(this.fs.exists(existingOutputPath));
Assert.assertEquals(this.fs.listStatus(existingOutputPath).length, 0);
writer.actualProcessedCopyableFile = Optional.of(cf);
// commit
writer.commit();
// check state of relevant paths after commit
Path expectedOutputPath = new Path(outputRoot, destinationWithoutLeadingSeparator);
Assert.assertTrue(this.fs.exists(expectedOutputPath));
fileStatus = this.fs.getFileStatus(expectedOutputPath);
Assert.assertEquals(fileStatus.getOwner(), ownerAndPermission.getOwner());
Assert.assertEquals(fileStatus.getGroup(), ownerAndPermission.getGroup());
Assert.assertEquals(fileStatus.getPermission(), readWrite);
// parent should have permissions set correctly
fileStatus = this.fs.getFileStatus(expectedOutputPath.getParent());
Assert.assertEquals(fileStatus.getPermission(), dirReadWrite);
// previously existing paths should not have permissions changed
fileStatus = this.fs.getFileStatus(existingOutputPath);
Assert.assertEquals(fileStatus.getPermission(), existingPathPermission);
Assert.assertFalse(this.fs.exists(writer.stagingDir));
}
@Test
public void testCommitWithAclPreservationWhenAncestorPathsAbsent() throws IOException {
String fileName = "file";
// Asemble destination paths
String destinationExistingToken = "destination";
String destinationAdditionalTokens = "path";
Path destinationWithoutLeadingSeparator = new Path(new Path(destinationExistingToken, destinationAdditionalTokens), fileName);
Path destination = new Path("/", destinationWithoutLeadingSeparator);
// Create temp directory
File tmpFile = Files.createTempDir();
tmpFile.deleteOnExit();
Path tmpPath = new Path(tmpFile.getAbsolutePath());
// create origin file
Path originFile = new Path(tmpPath, fileName);
this.fs.createNewFile(originFile);
// create stating dir
Path stagingDir = new Path(tmpPath, "staging");
this.fs.mkdirs(stagingDir);
// create output dir
Path outputDir = new Path(tmpPath, "output");
this.fs.mkdirs(outputDir);
// create copyable file
CopyableFile cf = createCopyableFile(originFile, destination, "a");
// create work unit state
WorkUnitState state = createWorkUnitState(stagingDir, outputDir, cf);
CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));
CopySource.serializeCopyEntity(state, cf);
CopySource.serializeCopyableDataset(state, metadata);
// create writer
FileAwareInputStreamDataWriter writer = new FileAwareInputStreamDataWriter(state, fs,1, 0, null);
// create output of writer.write
Path writtenFile = writer.getStagingFilePath(cf);
this.fs.mkdirs(writtenFile.getParent());
this.fs.createNewFile(writtenFile);
Path outputRoot = FileAwareInputStreamDataWriter.getPartitionOutputRoot(outputDir, cf.getDatasetAndPartition(metadata));
Path expectedOutputPath = new Path(outputRoot, destinationWithoutLeadingSeparator);
// check ancestor path is absent before commit
Assert.assertFalse(this.fs.exists(expectedOutputPath.getParent().getParent()));
writer.actualProcessedCopyableFile = Optional.of(cf);
// commit
writer.commit();
// check ancestor path was created as part of commit
Assert.assertTrue(this.fs.exists(expectedOutputPath.getParent().getParent()));
verifyAclEntries(writer, this.fs.getPathToAclEntries(), expectedOutputPath, outputDir);
}
@Test
public void testCommitWithAclPreservationWhenAncestorPathsPresent() throws IOException {
String fileName = "file";
// Asemble destination paths
String destinationExistingToken = "destination";
String destinationAdditionalTokens = "path";
Path destinationWithoutLeadingSeparator = new Path(new Path(destinationExistingToken, destinationAdditionalTokens), fileName);
Path destination = new Path("/", destinationWithoutLeadingSeparator);
// Create temp directory
File tmpFile = Files.createTempDir();
tmpFile.deleteOnExit();
Path tmpPath = new Path(tmpFile.getAbsolutePath());
// create origin file
Path originFile = new Path(tmpPath, fileName);
this.fs.createNewFile(originFile);
// create stating dir
Path stagingDir = new Path(tmpPath, "staging");
this.fs.mkdirs(stagingDir);
// create output dir
Path outputDir = new Path(tmpPath, "output");
this.fs.mkdirs(outputDir);
// create copyable file
CopyableFile cf = createCopyableFile(originFile, destination, "a");
// create work unit state
WorkUnitState state = createWorkUnitState(stagingDir, outputDir, cf);
CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));
CopySource.serializeCopyEntity(state, cf);
CopySource.serializeCopyableDataset(state, metadata);
// create writer
FileAwareInputStreamDataWriter writer = new FileAwareInputStreamDataWriter(state, fs,1, 0, null);
// create output of writer.write
Path writtenFile = writer.getStagingFilePath(cf);
this.fs.mkdirs(writtenFile.getParent());
this.fs.createNewFile(writtenFile);
// create existing directories in writer output
Path outputRoot = FileAwareInputStreamDataWriter.getPartitionOutputRoot(outputDir, cf.getDatasetAndPartition(metadata));
Path existingOutputPath = new Path(outputRoot, destinationExistingToken);
Path expectedOutputPath = new Path(outputRoot, destinationWithoutLeadingSeparator);
// create output path and check ancestor path is present
this.fs.mkdirs(existingOutputPath);
Assert.assertTrue(this.fs.exists(expectedOutputPath.getParent().getParent()));
writer.actualProcessedCopyableFile = Optional.of(cf);
// commit
writer.commit();
verifyAclEntries(writer, this.fs.getPathToAclEntries(), expectedOutputPath, existingOutputPath);
}
private void verifyAclEntries(FileAwareInputStreamDataWriter writer, ImmutableMap pathToAclEntries, Path expectedOutputPath, Path ancestorRoot) {
// fetching and preparing file paths from FileAwareInputStreamDataWriter object
Path outputDir = writer.outputDir;
String[] splitExpectedOutputPath = expectedOutputPath.toString().split("output");
Path dstOutputPath = new Path(outputDir.toString().concat(splitExpectedOutputPath[1])).getParent();
OwnerAndPermission destinationOwnerAndPermission = writer.actualProcessedCopyableFile.get().getDestinationOwnerAndPermission();
List<AclEntry> actual = destinationOwnerAndPermission.getAclEntries();
while (!dstOutputPath.equals(ancestorRoot)) {
List<AclEntry> expected = (List<AclEntry>) pathToAclEntries.get(dstOutputPath);
Assert.assertEquals(actual, expected);
dstOutputPath = dstOutputPath.getParent();
}
}
private WorkUnitState createWorkUnitState(Path stagingDir, Path outputDir, CopyableFile cf) {
WorkUnitState state = TestUtils.createTestWorkUnitState();
state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, stagingDir.toUri().getPath());
state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, outputDir.toUri().getPath());
state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5));
return state;
}
private CopyableFile createCopyableFile(Path originFile, Path destination, String preserveAttrs) throws IOException {
FileStatus status = this.fs.getFileStatus(originFile);
FsPermission readWrite = new FsPermission(FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.READ_WRITE);
AclEntry aclEntry = new AclEntry.Builder()
.setPermission(FsAction.READ_WRITE)
.setName("test-acl")
.setScope(AclEntryScope.DEFAULT)
.setType(AclEntryType.GROUP)
.build();
List<AclEntry> aclEntryList = Lists.newArrayList();
aclEntryList.add(aclEntry);
OwnerAndPermission ownerAndPermission = new OwnerAndPermission(status.getOwner(), status.getGroup(), readWrite, aclEntryList);
List<OwnerAndPermission> ancestorOwnerAndPermissions = Lists.newArrayList();
ancestorOwnerAndPermissions.add(ownerAndPermission);
ancestorOwnerAndPermissions.add(ownerAndPermission);
ancestorOwnerAndPermissions.add(ownerAndPermission);
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/publisher");
CopyableFile cf = CopyableFile.fromOriginAndDestination(this.fs, status, destination,
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties).publishDir(new Path("/target"))
.preserve(PreserveAttributes.fromMnemonicString(preserveAttrs)).build())
.destinationOwnerAndPermission(ownerAndPermission)
.ancestorsOwnerAndPermission(ancestorOwnerAndPermissions)
.build();
return cf;
}
@Test
public void testAddExecutePermission() {
String[] setPermissions = {"000", "100", "200", "400", "600", "700", "211", "250"};
String[] expectPermissions = {"100", "100", "300", "500", "700", "700", "311", "350"};
String[] stickyBit = {"" ,"1"};
for (String bit : stickyBit) {
for (int index = 0; index < setPermissions.length; ++index) {
Assert.assertEquals(FileAwareInputStreamDataWriter.addExecutePermissionToOwner(new FsPermission(bit + setPermissions[index])),
new FsPermission(bit + expectPermissions[index]));
}
}
}
@Test
public void testRetryingAfterFailure() throws Exception {
String streamString1 = "testContents1";
FileStatus status = fs.getFileStatus(testTempPath);
OwnerAndPermission ownerAndPermission = new OwnerAndPermission(status.getOwner(), status.getGroup(),
new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL), Lists.newArrayList());
CopyableFile cf = CopyableFileUtils.getTestCopyableFile((long) streamString1.length(), ownerAndPermission);
CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));
WorkUnitState state = TestUtils.createTestWorkUnitState();
state.setProp(ConfigurationKeys.USER_DEFINED_STAGING_DIR_FLAG, false);
state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString());
state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString());
state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5));
CopySource.serializeCopyEntity(state, cf);
CopySource.serializeCopyableDataset(state, metadata);
FileSystem fileSystem = spy(FileSystem.get(URI.create("file:///"), WriterUtils.getFsConfiguration(state)));
FileAwareInputStreamDataWriter dataWriter = new FileAwareInputStreamDataWriter(state, fileSystem, 1, 0, null);
FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder()
.file(cf)
.inputStream(StreamUtils.convertStream(IOUtils.toInputStream(streamString1, StandardCharsets.UTF_8)))
.build();
doThrow(new AccessDeniedException("Test")).when(fileSystem).mkdirs(any());
for (int i = 1; i <= 3; i++) {
try {
dataWriter.write(fileAwareInputStream);
Assert.fail("Expected method to throw AccessDeniedException on call #" + i);
} catch (AccessDeniedException e) {
// expected exception
}
}
doCallRealMethod().when(fileSystem).mkdirs(any());
dataWriter.write(fileAwareInputStream);
dataWriter.commit();
Path writtenFilePath = new Path(
new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR), cf.getDatasetAndPartition(metadata).identifier()),
cf.getDestination());
Assert.assertEquals(IOUtils.toString(new FileInputStream(writtenFilePath.toString()), StandardCharsets.UTF_8),
streamString1);
}
@AfterClass
public void cleanup() {
try {
fs.delete(testTempPath, true);
} catch (IOException e) {
// ignore
}
}
/**
* Created this class to support `setAcl` method for {@link LocalFileSystem} for unit testing since LocalFileSystem
* doesn't provide any implementation for `setAcl` method
*/
protected class TestLocalFileSystem extends LocalFileSystem {
private final ConcurrentHashMap<Path, List<AclEntry>> pathToAclEntries = new ConcurrentHashMap<>();
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclEntries) {
pathToAclEntries.put(path, aclEntries);
}
public ImmutableMap<Path, List<AclEntry>> getPathToAclEntries() {
return ImmutableMap.copyOf(pathToAclEntries);
}
}
} | 2,360 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/hive/HiveDatasetTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import com.google.common.base.Optional;
import java.io.IOException;
import java.util.Properties;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.data.management.retention.version.HiveDatasetVersionCleaner;
public class HiveDatasetTest {
private static String DUMMY_CONFIG_KEY_WITH_DB_TOKEN = "dummyConfig.withDB";
private static String DUMMY_CONFIG_KEY_WITH_STRIP_SUFFIX = "dummyConfig" + ConfigUtils.STRIP_SUFFIX;
private static String DUMMY_CONFIG_KEY_WITH_TABLE_TOKEN = "dummyConfig.withTable";
private static Config config = ConfigFactory.parseMap(
ImmutableMap.<String, String>builder().put(DUMMY_CONFIG_KEY_WITH_STRIP_SUFFIX, "testRoot")
.put(HiveDatasetVersionCleaner.REPLACEMENT_HIVE_DB_NAME_KEY, "resPrefix_$LOGICAL_DB_resPostfix")
.put(HiveDatasetVersionCleaner.REPLACEMENT_HIVE_TABLE_NAME_KEY, "resPrefix_$LOGICAL_TABLE_resPostfix")
.put(DUMMY_CONFIG_KEY_WITH_DB_TOKEN, "resPrefix_$DB_resPostfix")
.put(DUMMY_CONFIG_KEY_WITH_TABLE_TOKEN, "resPrefix_$TABLE_resPostfix")
.build());
@Test
public void testParseLogicalDbAndTable() throws IOException {
String datasetNamePattern;
HiveDatasetFinder.DbAndTable logicalDbAndTable;
// Happy Path
datasetNamePattern = "dbPrefix_$LOGICAL_DB_dbPostfix.tablePrefix_$LOGICAL_TABLE_tablePostfix";
logicalDbAndTable = HiveDataset.parseLogicalDbAndTable(datasetNamePattern,
new HiveDatasetFinder.DbAndTable("dbPrefix_myDB_dbPostfix", "tablePrefix_myTable_tablePostfix"),
HiveDataset.LOGICAL_DB_TOKEN, HiveDataset.LOGICAL_TABLE_TOKEN);
Assert.assertEquals(logicalDbAndTable.getDb(), "myDB", "DB name not parsed correctly");
Assert.assertEquals(logicalDbAndTable.getTable(), "myTable", "Table name not parsed correctly");
// Happy Path - without prefix in DB and Table names
datasetNamePattern = "$LOGICAL_DB_dbPostfix.$LOGICAL_TABLE_tablePostfix";
logicalDbAndTable = HiveDataset.parseLogicalDbAndTable(datasetNamePattern,
new HiveDatasetFinder.DbAndTable("myDB_dbPostfix", "myTable_tablePostfix"), HiveDataset.LOGICAL_DB_TOKEN,
HiveDataset.LOGICAL_TABLE_TOKEN);
Assert.assertEquals(logicalDbAndTable.getDb(), "myDB", "DB name not parsed correctly");
Assert.assertEquals(logicalDbAndTable.getTable(), "myTable", "Table name not parsed correctly");
// Happy Path - without postfix in DB and Table names
datasetNamePattern = "dbPrefix_$LOGICAL_DB.tablePrefix_$LOGICAL_TABLE";
logicalDbAndTable = HiveDataset.parseLogicalDbAndTable(datasetNamePattern,
new HiveDatasetFinder.DbAndTable("dbPrefix_myDB", "tablePrefix_myTable"), HiveDataset.LOGICAL_DB_TOKEN,
HiveDataset.LOGICAL_TABLE_TOKEN);
Assert.assertEquals(logicalDbAndTable.getDb(), "myDB", "DB name not parsed correctly");
Assert.assertEquals(logicalDbAndTable.getTable(), "myTable", "Table name not parsed correctly");
// Happy Path - without any prefix and postfix in DB and Table names
datasetNamePattern = "$LOGICAL_DB.$LOGICAL_TABLE";
logicalDbAndTable =
HiveDataset.parseLogicalDbAndTable(datasetNamePattern, new HiveDatasetFinder.DbAndTable("myDB", "myTable"),
HiveDataset.LOGICAL_DB_TOKEN, HiveDataset.LOGICAL_TABLE_TOKEN);
Assert.assertEquals(logicalDbAndTable.getDb(), "myDB", "DB name not parsed correctly");
Assert.assertEquals(logicalDbAndTable.getTable(), "myTable", "Table name not parsed correctly");
// Dataset name pattern missing
datasetNamePattern = "";
try {
logicalDbAndTable = HiveDataset.parseLogicalDbAndTable(datasetNamePattern,
new HiveDatasetFinder.DbAndTable("dbPrefix_myDB_dbPostfix", "tablePrefix_myTable_tablePostfix"),
HiveDataset.LOGICAL_DB_TOKEN, HiveDataset.LOGICAL_TABLE_TOKEN);
Assert.fail("Dataset name pattern is missing, code should have thrown exception");
} catch (IllegalArgumentException e) {
// Ignore exception, it was expected
}
// Malformed Dataset name pattern
datasetNamePattern = "dbPrefix_$LOGICAL_DB_dbPostfixtablePrefix_$LOGICAL_TABLE_tablePostfix";
try {
logicalDbAndTable = HiveDataset.parseLogicalDbAndTable(datasetNamePattern,
new HiveDatasetFinder.DbAndTable("dbPrefix_myDB_dbPostfix", "tablePrefix_myTable_tablePostfix"),
HiveDataset.LOGICAL_DB_TOKEN, HiveDataset.LOGICAL_TABLE_TOKEN);
Assert.fail("Dataset name pattern is missing, code should have thrown exception");
} catch (IllegalArgumentException e) {
// Ignore exception, it was expected
}
}
@Test
public void testExtractTokenValueFromEntity() throws IOException {
String tokenValue;
// Happy Path
tokenValue = HiveDataset.extractTokenValueFromEntity("dbPrefix_myDB_dbPostfix", "dbPrefix_$LOGICAL_DB_dbPostfix",
HiveDataset.LOGICAL_DB_TOKEN);
Assert.assertEquals(tokenValue, "myDB", "DB name not extracted correctly");
// Happy Path - without prefix
tokenValue = HiveDataset.extractTokenValueFromEntity("myDB_dbPostfix", "$LOGICAL_DB_dbPostfix",
HiveDataset.LOGICAL_DB_TOKEN);
Assert.assertEquals(tokenValue, "myDB", "DB name not extracted correctly");
// Happy Path - without postfix
tokenValue = HiveDataset.extractTokenValueFromEntity("dbPrefix_myDB", "dbPrefix_$LOGICAL_DB", HiveDataset.LOGICAL_DB_TOKEN);
Assert.assertEquals(tokenValue, "myDB", "DB name not extracted correctly");
// Missing token in template
try {
tokenValue =
HiveDataset.extractTokenValueFromEntity("dbPrefix_myDB_dbPostfix", "dbPrefix_$LOGICAL_TABLE_dbPostfix",
HiveDataset.LOGICAL_DB_TOKEN);
Assert.fail("Token is missing in template, code should have thrown exception");
} catch (IllegalArgumentException e) {
// Ignore exception, it was expected
}
// Missing source entity
try {
tokenValue = HiveDataset.extractTokenValueFromEntity("", "dbPrefix_$LOGICAL_DB_dbPostfix", HiveDataset.LOGICAL_DB_TOKEN);
Assert.fail("Source entity is missing, code should have thrown exception");
} catch (IllegalArgumentException e) {
// Ignore exception, it was expected
}
// Missing template
try {
tokenValue = HiveDataset.extractTokenValueFromEntity("dbPrefix_myDB_dbPostfix", "", HiveDataset.LOGICAL_DB_TOKEN);
Assert.fail("Template is missing, code should have thrown exception");
} catch (IllegalArgumentException e) {
// Ignore exception, it was expected
}
}
@Test
public void testResolveConfig() throws IOException {
HiveDatasetFinder.DbAndTable realDbAndTable = new HiveDatasetFinder.DbAndTable("realDb", "realTable");
HiveDatasetFinder.DbAndTable logicalDbAndTable = new HiveDatasetFinder.DbAndTable("logicalDb", "logicalTable");
Config resolvedConfig = HiveDataset.resolveConfig(config, realDbAndTable, logicalDbAndTable);
Assert.assertEquals(resolvedConfig.getString(DUMMY_CONFIG_KEY_WITH_DB_TOKEN), "resPrefix_realDb_resPostfix",
"Real DB not resolved correctly");
Assert.assertEquals(resolvedConfig.getString(DUMMY_CONFIG_KEY_WITH_TABLE_TOKEN), "resPrefix_realTable_resPostfix",
"Real Table not resolved correctly");
Assert.assertEquals(resolvedConfig.getString(HiveDatasetVersionCleaner.REPLACEMENT_HIVE_DB_NAME_KEY),
"resPrefix_logicalDb_resPostfix", "Logical DB not resolved correctly");
Assert.assertEquals(resolvedConfig.getString(HiveDatasetVersionCleaner.REPLACEMENT_HIVE_TABLE_NAME_KEY),
"resPrefix_logicalTable_resPostfix", "Logical Table not resolved correctly");
Assert.assertEquals(resolvedConfig.getString(DUMMY_CONFIG_KEY_WITH_STRIP_SUFFIX), "testRoot");
}
@Test(expectedExceptions = RuntimeException.class)
public void testThrowsErrorIfCopyEntityHelperFails() throws Exception {
Properties copyProperties = new Properties();
Properties hiveProperties = new Properties();
copyProperties.put(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target");
// Invoke an IOException by passing in a class that does not exist to HiveCopyEntityHelper constructor
hiveProperties.put(HiveCopyEntityHelper.COPY_PARTITION_FILTER_GENERATOR, "missingClass");
Table table = new Table(Table.getEmptyTable("testDB", "testTable"));
HiveMetastoreClientPool pool = HiveMetastoreClientPool.get(new Properties(), Optional.absent());
HiveDataset passingDataset = new HiveDataset(new LocalFileSystem(), pool, table, hiveProperties);
// Even though IOException is thrown HiveDataset should silence it due to not having the configuration flag
try {
CopyConfiguration copyConfigWithoutAbortKey = CopyConfiguration.builder(new LocalFileSystem(), copyProperties).build();
passingDataset.getFileSetIterator(FileSystem.getLocal(new Configuration()), copyConfigWithoutAbortKey);
} catch (Exception e) {
Assert.fail("IOException should log and fail silently since it is not configured");
}
// Exception should propagate to a RuntimeException since flag is enabled
copyProperties.put(CopyConfiguration.ABORT_ON_SINGLE_DATASET_FAILURE, "true");
HiveDataset failingDataset = new HiveDataset(new LocalFileSystem(), pool, table, hiveProperties);
CopyConfiguration copyConfiguration = CopyConfiguration.builder(new LocalFileSystem(), copyProperties).build();
failingDataset.getFileSetIterator(FileSystem.getLocal(new Configuration()), copyConfiguration);
}
@Test(expectedExceptions = RuntimeException.class)
public void testThrowsErrorIfTableNotCopyable() throws Exception {
Properties copyProperties = new Properties();
Properties hiveProperties = new Properties();
copyProperties.put(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target");
Table table = new Table(Table.getEmptyTable("testDB", "testTable"));
// Virtual view tables are not copyable
table.setTableType(TableType.VIRTUAL_VIEW);
HiveMetastoreClientPool pool = HiveMetastoreClientPool.get(new Properties(), Optional.absent());
HiveDataset passingDataset = new HiveDataset(new LocalFileSystem(), pool, table, hiveProperties);
// Since flag is not enabled the dataset should log an error and continue
try {
CopyConfiguration copyConfigWithoutAbortKey = CopyConfiguration.builder(new LocalFileSystem(), copyProperties).build();
passingDataset.getFileSetIterator(FileSystem.getLocal(new Configuration()), copyConfigWithoutAbortKey);
} catch (Exception e) {
Assert.fail("IOException should log and fail silently since it is not configured");
}
// Exception should propagate to a RuntimeException since flag is enabled
copyProperties.put(CopyConfiguration.ABORT_ON_SINGLE_DATASET_FAILURE, "true");
HiveDataset failingDataset = new HiveDataset(new LocalFileSystem(), pool, table, hiveProperties);
CopyConfiguration copyConfiguration = CopyConfiguration.builder(new LocalFileSystem(), copyProperties).build();
failingDataset.getFileSetIterator(FileSystem.getLocal(new Configuration()), copyConfiguration);
}
} | 2,361 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/hive/HiveCopyEntityHelperTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import lombok.AllArgsConstructor;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.entities.PostPublishStep;
import org.apache.gobblin.data.management.copy.hive.HiveCopyEntityHelper.DeregisterFileDeleteMethod;
import org.apache.gobblin.hive.HiveRegProps;
import org.apache.gobblin.metrics.event.MultiTimingEvent;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.Test;
public class HiveCopyEntityHelperTest {
private final Path sourceRoot = new Path("/source");
private final Path targetRoot = new Path("/target");
@Test public void testResolvePath() throws Exception {
Assert.assertEquals(HiveTargetPathHelper.resolvePath("/data/$DB/$TABLE", "db", "table"), new Path("/data/db/table"));
Assert.assertEquals(HiveTargetPathHelper.resolvePath("/data/$TABLE", "db", "table"), new Path("/data/table"));
Assert.assertEquals(HiveTargetPathHelper.resolvePath("/data", "db", "table"), new Path("/data/table"));
}
@Test
public void testFullPathDiff() throws Exception {
Map<Path, FileStatus> sourceMap = Maps.newHashMap();
Map<Path, FileStatus> targetDesiredMap = Maps.newHashMap();
List<Path> expectedFilesToCopy = Lists.newArrayList();
List<Path> expectedFilesToSkipCopy = Lists.newArrayList();
List<Path> expectedFilesToDelete = Lists.newArrayList();
List<Path> expectedFilesToSkipDelete = Lists.newArrayList();
populateSourceAndTargetEntities(sourceMap, targetDesiredMap, expectedFilesToCopy,
expectedFilesToSkipCopy, expectedFilesToDelete, expectedFilesToSkipDelete);
TestLocationDescriptor sourceLocation = new TestLocationDescriptor(sourceMap);
TestLocationDescriptor targetDesiredLocation = new TestLocationDescriptor(targetDesiredMap);
TestLocationDescriptor existingTargetLocation = new TestLocationDescriptor(Maps.newHashMap(targetDesiredMap));
MultiTimingEvent timer = Mockito.mock(MultiTimingEvent.class);
HiveCopyEntityHelper helper = Mockito.mock(HiveCopyEntityHelper.class);
Mockito.when(helper.isEnforceFileSizeMatch()).thenReturn(true);
HiveTargetPathHelper targetPathHelper = Mockito.mock(HiveTargetPathHelper.class);
Mockito.when(targetPathHelper
.getTargetPath(Mockito.any(Path.class), Mockito.any(FileSystem.class), Mockito.any(Optional.class),
Mockito.anyBoolean())).then(new Answer<Path>() {
@Override
public Path answer(InvocationOnMock invocation)
throws Throwable {
Path path = (Path) invocation.getArguments()[0];
return new Path(path.toString().replace(sourceRoot.toString(), targetRoot.toString()));
}
});
Mockito.when(helper.getTargetPathHelper()).thenReturn(targetPathHelper);
HiveCopyEntityHelper.DiffPathSet diff =
HiveCopyEntityHelper.fullPathDiff(sourceLocation, targetDesiredLocation,
Optional.<HiveLocationDescriptor>of(existingTargetLocation), Optional.<Partition>absent(), timer, helper);
Assert.assertEquals(diff.filesToCopy.size(), expectedFilesToCopy.size());
for (Path expectedFileToCopy : expectedFilesToCopy) {
Assert.assertTrue(containsPath(diff.filesToCopy, expectedFileToCopy));
}
for (Path expectedFileToSkipCopy : expectedFilesToSkipCopy) {
Assert.assertFalse(containsPath(diff.filesToCopy, expectedFileToSkipCopy));
}
Assert.assertEquals(diff.pathsToDelete.size(), expectedFilesToDelete.size());
for (Path expectedFileToDelete : expectedFilesToDelete) {
Assert.assertTrue(diff.pathsToDelete.contains(expectedFileToDelete));
}
for (Path expectedFileToSkipDelete : expectedFilesToSkipDelete) {
Assert.assertFalse(diff.pathsToDelete.contains(expectedFileToSkipDelete));
}
}
@Test
public void testFullPathDiffWithUnmanagedPathsWithoutDeletePolicy() throws Exception {
Map<Path, FileStatus> sourceMap = Maps.newHashMap();
Map<Path, FileStatus> targetDesiredMap = Maps.newHashMap();
List<Path> expectedFilesToCopy = Lists.newArrayList();
List<Path> expectedFilesToSkipCopy = Lists.newArrayList();
List<Path> expectedFilesToDelete = Lists.newArrayList();
List<Path> expectedFilesToSkipDelete = Lists.newArrayList();
populateSourceAndTargetEntities(sourceMap, targetDesiredMap, expectedFilesToCopy,
expectedFilesToSkipCopy, expectedFilesToDelete, expectedFilesToSkipDelete);
// add un-managed files to the target path
Path path6 = new Path("path6");
Path targetPath6 = new Path(targetRoot, path6);
Map<Path, FileStatus> targetDesiredMapWithExtraFile = Maps.newHashMap(targetDesiredMap);
targetDesiredMapWithExtraFile.put(targetPath6, getFileStatus(targetPath6, 0, 10));
expectedFilesToDelete.add(targetPath6);
TestLocationDescriptor sourceLocation = new TestLocationDescriptor(sourceMap);
TestLocationDescriptor targetDesiredLocation = new TestLocationDescriptor(targetDesiredMapWithExtraFile);
TestLocationDescriptor existingTargetLocation = new TestLocationDescriptor(Maps.newHashMap(targetDesiredMap));
Table table = Mockito.mock(Table.class);
HiveDataset hiveDataset = Mockito.mock(HiveDataset.class);
MultiTimingEvent timer = Mockito.mock(MultiTimingEvent.class);
HiveCopyEntityHelper helper = Mockito.mock(HiveCopyEntityHelper.class);
Mockito.when(helper.isEnforceFileSizeMatch()).thenReturn(true);
HiveTargetPathHelper targetPathHelper = Mockito.mock(HiveTargetPathHelper.class);
Mockito.when(helper.getDataset()).thenReturn(hiveDataset);
Mockito.when(hiveDataset.getTable()).thenReturn(table);
Mockito.when(table.getCompleteName()).thenReturn("table1");
Mockito.when(targetPathHelper
.getTargetPath(Mockito.any(Path.class), Mockito.any(FileSystem.class), Mockito.any(Optional.class),
Mockito.anyBoolean())).then(new Answer<Path>() {
@Override
public Path answer(InvocationOnMock invocation)
throws Throwable {
Path path = (Path) invocation.getArguments()[0];
return new Path(path.toString().replace(sourceRoot.toString(), targetRoot.toString()));
}
});
Mockito.when(helper.getTargetPathHelper()).thenReturn(targetPathHelper);
// Add policy to not delete un-managed data
Mockito.when(helper.getUnmanagedDataPolicy()).thenReturn(HiveCopyEntityHelper.UnmanagedDataPolicy.ABORT);
// We should receive an exception that un-managed files are detected
try {
HiveCopyEntityHelper.DiffPathSet diff =
HiveCopyEntityHelper.fullPathDiff(sourceLocation, targetDesiredLocation,
Optional.<HiveLocationDescriptor>of(existingTargetLocation), Optional.<Partition>absent(), timer, helper);
Assert.fail("Expected an IOException but did not receive any");
} catch (IOException ex) {
// Ignore IOException if message is what we expect
String expectedExceptionMessage = "New table / partition would pick up existing, undesired files in target file "
+ "system. table1, files [/target/path6].";
Assert.assertEquals(ex.getMessage(), expectedExceptionMessage);
}
}
@Test
public void testFullPathDiffWithUnmanagedPathsWithDeletePolicy() throws Exception {
Map<Path, FileStatus> sourceMap = Maps.newHashMap();
Map<Path, FileStatus> targetDesiredMap = Maps.newHashMap();
List<Path> expectedFilesToCopy = Lists.newArrayList();
List<Path> expectedFilesToSkipCopy = Lists.newArrayList();
List<Path> expectedFilesToDelete = Lists.newArrayList();
List<Path> expectedFilesToSkipDelete = Lists.newArrayList();
populateSourceAndTargetEntities(sourceMap, targetDesiredMap, expectedFilesToCopy,
expectedFilesToSkipCopy, expectedFilesToDelete, expectedFilesToSkipDelete);
// add un-managed files to the target path
Path path6 = new Path("path6");
Path targetPath6 = new Path(targetRoot, path6);
Map<Path, FileStatus> targetDesiredMapWithExtraFile = Maps.newHashMap(targetDesiredMap);
targetDesiredMapWithExtraFile.put(targetPath6, getFileStatus(targetPath6, 0, 10));
expectedFilesToDelete.add(targetPath6);
TestLocationDescriptor sourceLocation = new TestLocationDescriptor(sourceMap);
TestLocationDescriptor targetDesiredLocation = new TestLocationDescriptor(targetDesiredMapWithExtraFile);
TestLocationDescriptor existingTargetLocation = new TestLocationDescriptor(Maps.newHashMap(targetDesiredMap));
Table table = Mockito.mock(Table.class);
HiveDataset hiveDataset = Mockito.mock(HiveDataset.class);
MultiTimingEvent timer = Mockito.mock(MultiTimingEvent.class);
HiveCopyEntityHelper helper = Mockito.mock(HiveCopyEntityHelper.class);
Mockito.when(helper.isEnforceFileSizeMatch()).thenReturn(true);
HiveTargetPathHelper targetPathHelper = Mockito.mock(HiveTargetPathHelper.class);
Mockito.when(helper.getDataset()).thenReturn(hiveDataset);
Mockito.when(hiveDataset.getTable()).thenReturn(table);
Mockito.when(table.getCompleteName()).thenReturn("table1");
Mockito.when(targetPathHelper
.getTargetPath(Mockito.any(Path.class), Mockito.any(FileSystem.class), Mockito.any(Optional.class),
Mockito.anyBoolean())).then(new Answer<Path>() {
@Override
public Path answer(InvocationOnMock invocation)
throws Throwable {
Path path = (Path) invocation.getArguments()[0];
return new Path(path.toString().replace(sourceRoot.toString(), targetRoot.toString()));
}
});
Mockito.when(helper.getTargetPathHelper()).thenReturn(targetPathHelper);
// Add policy to delete un-managed data
Mockito.when(helper.getUnmanagedDataPolicy()).thenReturn(HiveCopyEntityHelper.UnmanagedDataPolicy.DELETE_UNMANAGED_DATA);
// Since policy is specified to delete un-managed data, this should not throw exception and un-managed file should
// .. show up in pathsToDelete in the diff
HiveCopyEntityHelper.DiffPathSet diff =
HiveCopyEntityHelper.fullPathDiff(sourceLocation, targetDesiredLocation,
Optional.<HiveLocationDescriptor>of(existingTargetLocation), Optional.<Partition>absent(), timer, helper);
Assert.assertEquals(diff.filesToCopy.size(), expectedFilesToCopy.size());
for (Path expectedFileToCopy : expectedFilesToCopy) {
Assert.assertTrue(containsPath(diff.filesToCopy, expectedFileToCopy));
}
for (Path expectedFileToSkipCopy : expectedFilesToSkipCopy) {
Assert.assertFalse(containsPath(diff.filesToCopy, expectedFileToSkipCopy));
}
Assert.assertEquals(diff.pathsToDelete.size(), expectedFilesToDelete.size());
for (Path expectedFileToDelete : expectedFilesToDelete) {
Assert.assertTrue(diff.pathsToDelete.contains(expectedFileToDelete));
}
for (Path expectedFileToSkipDelete : expectedFilesToSkipDelete) {
Assert.assertFalse(diff.pathsToDelete.contains(expectedFileToSkipDelete));
}
}
private void populateSourceAndTargetEntities(Map<Path, FileStatus> sourceMap, Map<Path, FileStatus> targetDesiredMap,
List<Path> expectedFilesToCopy, List<Path> expectedFilesToSkipCopy,
List<Path> expectedFilesToDelete, List<Path> expectedFilesToSkipDelete) {
List<FileStatus> sourceFileStatuses = Lists.newArrayList();
List<FileStatus> desiredTargetStatuses = Lists.newArrayList();
// already exists in target
Path path1 = new Path("path1");
Path sourcePath1 = new Path(sourceRoot, path1);
Path targetPath1 = new Path(targetRoot, path1);
sourceFileStatuses.add(getFileStatus(sourcePath1, 0, 0));
desiredTargetStatuses.add(getFileStatus(targetPath1, 0, 10));
expectedFilesToSkipCopy.add(sourcePath1);
expectedFilesToSkipDelete.add(targetPath1);
// not exists in target
Path path2 = new Path("path2");
Path sourcePath2 = new Path(sourceRoot, path2);
Path targetPath2 = new Path(targetRoot, path2);
sourceFileStatuses.add(getFileStatus(sourcePath2, 0, 0));
expectedFilesToCopy.add(sourcePath2);
expectedFilesToSkipDelete.add(targetPath2);
// exists in target, different length
Path path3 = new Path("path3");
Path sourcePath3 = new Path(sourceRoot, path3);
Path targetPath3 = new Path(targetRoot, path3);
sourceFileStatuses.add(getFileStatus(sourcePath3, 0, 0));
desiredTargetStatuses.add(getFileStatus(targetPath3, 10, 0));
expectedFilesToCopy.add(sourcePath3);
expectedFilesToDelete.add(targetPath3);
// exists in target, newer modtime
Path path4 = new Path("path4");
Path sourcePath4 = new Path(sourceRoot, path4);
Path targetPath4 = new Path(targetRoot, path4);
sourceFileStatuses.add(getFileStatus(sourcePath4, 0, 10));
desiredTargetStatuses.add(getFileStatus(targetPath4, 0, 0));
expectedFilesToCopy.add(sourcePath4);
expectedFilesToDelete.add(targetPath4);
// only on target, expect delete
Path path5 = new Path("path5");
Path sourcePath5 = new Path(sourceRoot, path5);
Path targetPath5 = new Path(targetRoot, path5);
desiredTargetStatuses.add(getFileStatus(targetPath5, 0, 10));
expectedFilesToSkipCopy.add(sourcePath5);
expectedFilesToDelete.add(targetPath5);
for(FileStatus status : sourceFileStatuses) {
sourceMap.put(status.getPath(), status);
}
for(FileStatus status : desiredTargetStatuses) {
targetDesiredMap.put(status.getPath(), status);
}
}
@Test
public void testAddTableDeregisterSteps() throws Exception {
HiveDataset dataset = Mockito.mock(HiveDataset.class);
Mockito.when(dataset.getProperties()).thenReturn(new Properties());
HiveCopyEntityHelper helper = Mockito.mock(HiveCopyEntityHelper.class);
Mockito.when(helper.getDeleteMethod()).thenReturn(DeregisterFileDeleteMethod.NO_DELETE);
Mockito.when(helper.getTargetMetastoreURI()).thenReturn(Optional.of("/targetURI"));
Mockito.when(helper.getHiveRegProps()).thenReturn(new HiveRegProps(new State()));
Mockito.when(helper.getDataset()).thenReturn(dataset);
Mockito.when(helper.addTableDeregisterSteps(Mockito.any(List.class), Mockito.any(String.class), Mockito.anyInt(),
Mockito.any(org.apache.hadoop.hive.ql.metadata.Table.class))).thenCallRealMethod();
org.apache.hadoop.hive.ql.metadata.Table meta_table = Mockito.mock(org.apache.hadoop.hive.ql.metadata.Table.class);
org.apache.hadoop.hive.metastore.api.Table api_table =
Mockito.mock(org.apache.hadoop.hive.metastore.api.Table.class);
Mockito.when(api_table.getDbName()).thenReturn("TestDB");
Mockito.when(api_table.getTableName()).thenReturn("TestTable");
Mockito.when(meta_table.getTTable()).thenReturn(api_table);
List<CopyEntity> copyEntities = new ArrayList<CopyEntity>();
String fileSet = "testFileSet";
int initialPriority = 0;
int priority = helper.addTableDeregisterSteps(copyEntities, fileSet, initialPriority, meta_table);
Assert.assertTrue(priority == 1);
Assert.assertTrue(copyEntities.size() == 1);
Assert.assertTrue(copyEntities.get(0) instanceof PostPublishStep);
PostPublishStep p = (PostPublishStep) (copyEntities.get(0));
Assert
.assertTrue(p.getStep().toString().contains("Deregister table TestDB.TestTable on Hive metastore /targetURI"));
}
@Test public void testReplacedPrefix() throws Exception {
Path sourcePath = new Path("/data/databases/DB1/Table1/SS1/part1.avro");
Path prefixTobeReplaced = new Path("/data/databases");
Path prefixReplacement = new Path("/data/databases/_parallel");
Path expected = new Path("/data/databases/_parallel/DB1/Table1/SS1/part1.avro");
Assert.assertEquals(HiveCopyEntityHelper.replacedPrefix(sourcePath, prefixTobeReplaced, prefixReplacement), expected);
}
@Test
public void testAddMetadataToTargetTable() throws Exception {
org.apache.hadoop.hive.ql.metadata.Table meta_table =
new Table(Table.getEmptyTable("testDB", "testTable"));
Map<String, String> storageParams = new HashMap<>();
storageParams.put("path", "randomPath");
meta_table.getSd().getSerdeInfo().setParameters(storageParams);
HiveCopyEntityHelper.addMetadataToTargetTable(meta_table, new Path("newPath"), "testDB", 10L);
Assert.assertEquals(meta_table.getSd().getSerdeInfo().getParameters().get("path"), "newPath");
storageParams.clear();
meta_table.getSd().getSerdeInfo().setParameters(storageParams);
HiveCopyEntityHelper.addMetadataToTargetTable(meta_table, new Path("newPath"), "testDB", 10L);
Assert.assertFalse(meta_table.getSd().getSerdeInfo().getParameters().containsKey("path"));
}
@Test
public void testGetTargetLocationDefault() throws Exception {
Properties copyProperties = new Properties();
copyProperties.put(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target");
Path testPath = new Path("/testPath");
Properties hiveProperties = new Properties();
Table table = new Table(Table.getEmptyTable("testDB", "testTable"));
table.setDataLocation(testPath);
HiveMetastoreClientPool pool = HiveMetastoreClientPool.get(new Properties(), Optional.absent());
HiveDataset dataset = new HiveDataset(new LocalFileSystem(), pool, table, hiveProperties);
HiveCopyEntityHelper helper = new HiveCopyEntityHelper(dataset,
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), copyProperties).build(),
new LocalFileSystem()
);
FileSystem fs = new LocalFileSystem();
// test that by default, the input path is the same as the output path
Path path = helper.getTargetLocation(fs, testPath, Optional.<Partition>absent());
Assert.assertEquals(testPath.toUri().getRawPath(), path.toUri().getRawPath());
}
@Test
public void testSetsDatasetShardPath() throws Exception {
Properties copyProperties = new Properties();
copyProperties.put(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target");
Path testPath = new Path("/testPath/db/table");
Properties hiveProperties = new Properties();
hiveProperties.setProperty(ConfigurationKeys.USE_DATASET_LOCAL_WORK_DIR, "true");
Table table = new Table(Table.getEmptyTable("testDB", "testTable"));
table.setDataLocation(testPath);
HiveMetastoreClientPool pool = HiveMetastoreClientPool.get(new Properties(), Optional.absent());
HiveDataset dataset = new HiveDataset(new LocalFileSystem(), pool, table, hiveProperties);
HiveCopyEntityHelper helper = new HiveCopyEntityHelper(dataset,
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), copyProperties).build(),
new LocalFileSystem()
);
Assert.assertEquals(helper.getDataset().getDatasetPath(), "/testPath/db/table");
}
@Test
public void testSetsDatasetShardPathWithReplacement() throws Exception {
Properties copyProperties = new Properties();
copyProperties.put(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/target");
Path testPath = new Path("/testPath/db/table");
Properties hiveProperties = new Properties();
hiveProperties.setProperty(ConfigurationKeys.USE_DATASET_LOCAL_WORK_DIR, "true");
hiveProperties.setProperty(HiveTargetPathHelper.COPY_TARGET_TABLE_PREFIX_TOBE_REPLACED, "/testPath");
hiveProperties.setProperty(HiveTargetPathHelper.COPY_TARGET_TABLE_PREFIX_REPLACEMENT, "/targetPath");
Table table = new Table(Table.getEmptyTable("testDB", "testTable"));
table.setDataLocation(testPath);
HiveMetastoreClientPool pool = HiveMetastoreClientPool.get(new Properties(), Optional.absent());
HiveDataset dataset = new HiveDataset(new LocalFileSystem(), pool, table, hiveProperties);
HiveCopyEntityHelper helper = new HiveCopyEntityHelper(dataset,
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), copyProperties).build(),
new LocalFileSystem()
);
Assert.assertEquals(helper.getDataset().getDatasetPath(), "/targetPath/db/table");
}
@Test
public void testPartitionedTableCompatibility() throws Exception {
FieldSchema partitionSchema = new FieldSchema("part", "string", "some comment");
List partitions = new ArrayList();
Path testPath = new Path("/testPath/db/table");
Path existingTablePath = new Path("/existing/testPath/db/table");
org.apache.hadoop.hive.ql.metadata.Table table = new org.apache.hadoop.hive.ql.metadata.Table("testDb","table1");
table.setDataLocation(testPath);
partitions.add(partitionSchema);
table.setPartCols(partitions);
org.apache.hadoop.hive.ql.metadata.Table existingTargetTable = new Table("testDb","table1");
existingTargetTable.setDataLocation(existingTablePath);
existingTargetTable.setPartCols(partitions);
HiveDataset hiveDataset = Mockito.mock(HiveDataset.class);
HiveCopyEntityHelper helper = Mockito.mock(HiveCopyEntityHelper.class);
Mockito.when(helper.getDataset()).thenReturn(hiveDataset);
Mockito.when(helper.getExistingTargetTable()).thenReturn(Optional.of(existingTargetTable));
Mockito.when(helper.getTargetTable()).thenReturn(table);
// Mock filesystem resolver
FileSystem mockFS = Mockito.mock(FileSystem.class);
Mockito.when(helper.getTargetFs()).thenReturn(mockFS);
Mockito.when(mockFS.resolvePath(Mockito.any())).thenReturn(new Path("hdfs://testPath/db/table"));
Mockito.doCallRealMethod().when(helper).checkPartitionedTableCompatibility(table, existingTargetTable);
helper.checkPartitionedTableCompatibility(table, existingTargetTable);
}
@Test
public void testPartitionedTableEqualityPathNotExist() throws Exception {
// If the paths are not equal, test if the parents are equivalent
FieldSchema partitionSchema = new FieldSchema("part", "string", "some comment");
List partitions = new ArrayList();
Path testPath = new Path("/testPath/db/table");
Path existingTablePath = new Path("/existing/testPath/db/table");
org.apache.hadoop.hive.ql.metadata.Table table = new org.apache.hadoop.hive.ql.metadata.Table("testDb","table1");
table.setDataLocation(testPath);
partitions.add(partitionSchema);
table.setPartCols(partitions);
org.apache.hadoop.hive.ql.metadata.Table existingTargetTable = new Table("testDb","table1");
existingTargetTable.setDataLocation(existingTablePath);
existingTargetTable.setPartCols(partitions);
HiveDataset hiveDataset = Mockito.mock(HiveDataset.class);
HiveCopyEntityHelper helper = Mockito.mock(HiveCopyEntityHelper.class);
Mockito.when(helper.getDataset()).thenReturn(hiveDataset);
Mockito.when(helper.getExistingTargetTable()).thenReturn(Optional.of(existingTargetTable));
Mockito.when(helper.getTargetTable()).thenReturn(table);
// Mock filesystem resolver
FileSystem mockFS = Mockito.mock(FileSystem.class);
Mockito.when(helper.getTargetFs()).thenReturn(mockFS);
Mockito.when(mockFS.resolvePath(existingTablePath)).thenThrow(FileNotFoundException.class);
Mockito.when(mockFS.resolvePath(existingTablePath.getParent())).thenReturn(new Path("hdfs://testPath/db/"));
Mockito.when(mockFS.resolvePath(testPath.getParent())).thenReturn(new Path("hdfs://testPath/db/"));
Mockito.doCallRealMethod().when(helper).checkPartitionedTableCompatibility(table, existingTargetTable);
helper.checkPartitionedTableCompatibility(table, existingTargetTable);
}
@Test
public void testTablePathInequality() throws Exception {
// If the child directory of the user specified path and the existing table path differs, there will never be a match
FieldSchema partitionSchema = new FieldSchema("part", "string", "some comment");
List partitions = new ArrayList();
Path testPath = new Path("/existing/testPath/db/newTable");
Path existingTablePath = new Path("/existing/testPath/db/table");
org.apache.hadoop.hive.ql.metadata.Table table = new org.apache.hadoop.hive.ql.metadata.Table("testDb","table1");
table.setDataLocation(testPath);
partitions.add(partitionSchema);
table.setPartCols(partitions);
org.apache.hadoop.hive.ql.metadata.Table existingTargetTable = new Table("testDb","table1");
existingTargetTable.setDataLocation(existingTablePath);
existingTargetTable.setPartCols(partitions);
HiveDataset hiveDataset = Mockito.mock(HiveDataset.class);
HiveCopyEntityHelper helper = Mockito.mock(HiveCopyEntityHelper.class);
Mockito.when(helper.getDataset()).thenReturn(hiveDataset);
Mockito.when(helper.getExistingTargetTable()).thenReturn(Optional.of(existingTargetTable));
Mockito.when(helper.getTargetTable()).thenReturn(table);
// Mock filesystem resolver
FileSystem mockFS = Mockito.mock(FileSystem.class);
Mockito.when(helper.getTargetFs()).thenReturn(mockFS);
Mockito.when(mockFS.resolvePath(existingTablePath)).thenThrow(FileNotFoundException.class);
Mockito.doCallRealMethod().when(helper).checkPartitionedTableCompatibility(table, existingTargetTable);
Assert.assertThrows(IOException.class, () -> helper.checkPartitionedTableCompatibility(table, existingTargetTable));
}
@Test
public void testTablePathEqualityExactPathMatch() throws Exception {
// If the user does not specify a difference in the paths, there shouldn't be any error thrown
FieldSchema partitionSchema = new FieldSchema("part", "string", "some comment");
List partitions = new ArrayList();
Path testPath = new Path("/existing/testPath/db/table");
Path existingTablePath = new Path("/existing/testPath/db/table");
org.apache.hadoop.hive.ql.metadata.Table table = new org.apache.hadoop.hive.ql.metadata.Table("testDb","table1");
table.setDataLocation(testPath);
partitions.add(partitionSchema);
table.setPartCols(partitions);
org.apache.hadoop.hive.ql.metadata.Table existingTargetTable = new Table("testDb","table1");
existingTargetTable.setDataLocation(existingTablePath);
existingTargetTable.setPartCols(partitions);
HiveDataset hiveDataset = Mockito.mock(HiveDataset.class);
HiveCopyEntityHelper helper = Mockito.mock(HiveCopyEntityHelper.class);
Mockito.when(helper.getDataset()).thenReturn(hiveDataset);
Mockito.when(helper.getExistingTargetTable()).thenReturn(Optional.of(existingTargetTable));
Mockito.when(helper.getTargetTable()).thenReturn(table);
// Mock filesystem resolver
FileSystem mockFS = Mockito.mock(FileSystem.class);
Mockito.when(helper.getTargetFs()).thenReturn(mockFS);
// Shouldn't matter since the strings are exact matches
Mockito.when(mockFS.resolvePath(existingTablePath)).thenThrow(FileNotFoundException.class);
Mockito.doCallRealMethod().when(helper).checkPartitionedTableCompatibility(table, existingTargetTable);
helper.checkPartitionedTableCompatibility(table, existingTargetTable);
}
private boolean containsPath(Collection<FileStatus> statuses, Path path) {
for (FileStatus status : statuses) {
if (status.getPath().equals(path)) {
return true;
}
}
return false;
}
private FileStatus getFileStatus(Path path, long len, long modtime) {
return new FileStatus(len, false, 0, 0, modtime, path);
}
public class TestLocationDescriptor extends HiveLocationDescriptor {
Map<Path, FileStatus> paths;
public TestLocationDescriptor(Map<Path, FileStatus> paths) {
super(null, null, new TestLocationFs(paths), new Properties());
this.paths = paths;
}
@Override
public Map<Path, FileStatus> getPaths()
throws IOException {
return this.paths;
}
}
@AllArgsConstructor
class TestLocationFs extends LocalFileSystem {
private Map<Path, FileStatus> paths;
public FileStatus getFileStatus(Path f) throws IOException {
return paths.get(f);
}
}
}
| 2,362 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/hive/HiveTargetPathHelperTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
public class HiveTargetPathHelperTest {
private static Path TABLE_ROOT = new Path("/table/path");
private FileSystem fs;
@BeforeMethod
public void setUp()
throws Exception {
this.fs = Mockito.mock(FileSystem.class);
Mockito.when(fs.makeQualified(Mockito.any(Path.class))).thenAnswer(new Answer<Path>() {
@Override
public Path answer(InvocationOnMock invocation)
throws Throwable {
return (Path) invocation.getArguments()[0];
}
});
}
@Test
public void testRelocateFilesUnpartitioned() {
Properties properties = new Properties();
properties.setProperty(HiveTargetPathHelper.RELOCATE_DATA_FILES_KEY, Boolean.toString(true));
properties.setProperty(HiveTargetPathHelper.COPY_TARGET_TABLE_ROOT, "/target");
HiveTargetPathHelper helper = createTestTargetPathHelper(properties);
Path source = new Path(TABLE_ROOT, "partition/file1");
Assert.assertEquals(helper.getTargetPath(source, this.fs, Optional.<Partition>absent(), true), new Path("/target/tableName/file1"));
}
@Test
public void testRelocateFilesPartitioned() {
Properties properties = new Properties();
properties.setProperty(HiveTargetPathHelper.RELOCATE_DATA_FILES_KEY, Boolean.toString(true));
properties.setProperty(HiveTargetPathHelper.COPY_TARGET_TABLE_ROOT, "/target");
HiveTargetPathHelper helper = createTestTargetPathHelper(properties);
Path source = new Path(TABLE_ROOT, "partition/file1");
Partition partition = Mockito.mock(Partition.class);
Mockito.when(partition.getValues()).thenReturn(Lists.newArrayList("part", "123"));
Assert.assertEquals(helper.getTargetPath(source, this.fs, Optional.of(partition), true), new Path("/target/tableName/part/123/file1"));
}
@Test
public void testTokenReplacement() {
Properties properties = new Properties();
properties.setProperty(HiveTargetPathHelper.RELOCATE_DATA_FILES_KEY, Boolean.toString(true));
properties.setProperty(HiveTargetPathHelper.COPY_TARGET_TABLE_ROOT, "/target/$DB/$TABLE");
HiveTargetPathHelper helper = createTestTargetPathHelper(properties);
Path source = new Path(TABLE_ROOT, "partition/file1");
Assert.assertEquals(helper.getTargetPath(source, this.fs, Optional.<Partition>absent(), true), new Path("/target/dbName/tableName/file1"));
}
@Test
public void testReplacePrefix() {
Properties properties = new Properties();
properties.setProperty(HiveTargetPathHelper.COPY_TARGET_TABLE_PREFIX_TOBE_REPLACED, "/table");
properties.setProperty(HiveTargetPathHelper.COPY_TARGET_TABLE_PREFIX_REPLACEMENT, "/replaced");
HiveTargetPathHelper helper = createTestTargetPathHelper(properties);
Path source = new Path(TABLE_ROOT, "partition/file1");
Assert.assertEquals(helper.getTargetPath(source, this.fs, Optional.<Partition>absent(), true), new Path("/replaced/path/partition/file1"));
}
@Test
public void testNewTableRoot() {
Properties properties = new Properties();
properties.setProperty(HiveTargetPathHelper.COPY_TARGET_TABLE_ROOT, "/target");
HiveTargetPathHelper helper = createTestTargetPathHelper(properties);
Path source = new Path(TABLE_ROOT, "partition/file1");
Assert.assertEquals(helper.getTargetPath(source, this.fs, Optional.<Partition>absent(), true), new Path("/target/tableName/partition/file1"));
}
@Test
public void testReplicatePaths() {
Properties properties = new Properties();
HiveTargetPathHelper helper = createTestTargetPathHelper(properties);
Path source = new Path(TABLE_ROOT, "partition/file1");
Assert.assertEquals(helper.getTargetPath(source, this.fs, Optional.<Partition>absent(), true), new Path(TABLE_ROOT, "partition/file1"));
}
private HiveTargetPathHelper createTestTargetPathHelper(Properties properties) {
HiveDataset dataset = Mockito.mock(HiveDataset.class);
Table table = new Table(new org.apache.hadoop.hive.metastore.api.Table());
table.setDbName("dbName");
table.setTableName("tableName");
Mockito.when(dataset.getTable()).thenReturn(table);
Mockito.when(dataset.getTableRootPath()).thenReturn(Optional.of(TABLE_ROOT));
Mockito.when(dataset.getProperties()).thenReturn(properties);
HiveTargetPathHelper helper = new HiveTargetPathHelper(dataset);
return helper;
}
}
| 2,363 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/hive/WhitelistBlacklistTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import java.util.Map;
import org.testng.annotations.Test;
import org.testng.Assert;
import com.google.common.collect.Maps;
import com.typesafe.config.ConfigFactory;
public class WhitelistBlacklistTest {
@Test
public void testSimpleWhitelist() throws Exception {
WhitelistBlacklist whitelistBlacklist = new WhitelistBlacklist("Dba.Tablea", "");
Assert.assertTrue(whitelistBlacklist.acceptDb("dba"));
Assert.assertFalse(whitelistBlacklist.acceptDb("dbb"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tablea"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dba", "tableb"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dbb", "tablea"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dbA", "tablea"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dbA", "TableA"));
}
@Test
public void testSimpleBlacklist() throws Exception {
WhitelistBlacklist whitelistBlacklist = new WhitelistBlacklist("", "dba.tablea");
Assert.assertTrue(whitelistBlacklist.acceptDb("dba"));
Assert.assertTrue(whitelistBlacklist.acceptDb("dbb"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dba", "tablea"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tableb"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dbb", "tablea"));
}
@Test
public void testDbWhitelist() throws Exception {
WhitelistBlacklist whitelistBlacklist = new WhitelistBlacklist("dba", "");
Assert.assertTrue(whitelistBlacklist.acceptDb("dba"));
Assert.assertFalse(whitelistBlacklist.acceptDb("dbb"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tablea"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tableb"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dbb", "tablea"));
}
@Test
public void testDbBlacklist() throws Exception {
WhitelistBlacklist whitelistBlacklist = new WhitelistBlacklist("", "dba");
Assert.assertFalse(whitelistBlacklist.acceptDb("dba"));
Assert.assertTrue(whitelistBlacklist.acceptDb("dbb"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dba", "tablea"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dba", "tableb"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dbb", "tablea"));
}
@Test
public void testDbWhitelistStar() throws Exception {
WhitelistBlacklist whitelistBlacklist = new WhitelistBlacklist("dba.*", "");
Assert.assertTrue(whitelistBlacklist.acceptDb("dba"));
Assert.assertFalse(whitelistBlacklist.acceptDb("dbb"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tablea"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tableb"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dbb", "tablea"));
}
@Test
public void testDbBlacklistStar() throws Exception {
WhitelistBlacklist whitelistBlacklist = new WhitelistBlacklist("", "dba.*");
Assert.assertFalse(whitelistBlacklist.acceptDb("dba"));
Assert.assertTrue(whitelistBlacklist.acceptDb("dbb"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dba", "tablea"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dba", "tableb"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dbb", "tablea"));
}
@Test
public void testMultiWhitelist() throws Exception {
WhitelistBlacklist whitelistBlacklist = new WhitelistBlacklist("dba,dbb.tablea", "");
Assert.assertTrue(whitelistBlacklist.acceptDb("dba"));
Assert.assertTrue(whitelistBlacklist.acceptDb("dbb"));
Assert.assertFalse(whitelistBlacklist.acceptDb("dbc"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tablea"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tableb"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dbb", "tablea"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dbc", "tablea"));
}
@Test
public void testMultipleTablesWhitelist() throws Exception {
WhitelistBlacklist whitelistBlacklist = new WhitelistBlacklist("dba.tablea|tableb", "");
Assert.assertTrue(whitelistBlacklist.acceptDb("dba"));
Assert.assertFalse(whitelistBlacklist.acceptDb("dbb"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tablea"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tableb"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dba", "tablec"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dbb", "tablea"));
}
@Test
public void testTablePattern() throws Exception {
WhitelistBlacklist whitelistBlacklist = new WhitelistBlacklist("dba.table*|accept", "");
Assert.assertTrue(whitelistBlacklist.acceptDb("dba"));
Assert.assertFalse(whitelistBlacklist.acceptDb("dbb"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tablea"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tableb"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "accept"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dba", "other"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dbb", "tablea"));
}
@Test
public void testDbPattern() throws Exception {
WhitelistBlacklist whitelistBlacklist = new WhitelistBlacklist("db*", "");
Assert.assertTrue(whitelistBlacklist.acceptDb("dba"));
Assert.assertTrue(whitelistBlacklist.acceptDb("dbb"));
Assert.assertFalse(whitelistBlacklist.acceptDb("database"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tablea"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tableb"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dbb", "tablea"));
Assert.assertFalse(whitelistBlacklist.acceptTable("database", "tablea"));
}
@Test
public void testDbAndTablePattern() throws Exception {
WhitelistBlacklist whitelistBlacklist = new WhitelistBlacklist("db*.table*", "");
Assert.assertTrue(whitelistBlacklist.acceptDb("dba"));
Assert.assertTrue(whitelistBlacklist.acceptDb("dbb"));
Assert.assertFalse(whitelistBlacklist.acceptDb("database"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tablea"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tableb"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dba", "other"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dbb", "tablea"));
Assert.assertFalse(whitelistBlacklist.acceptTable("database", "tablea"));
}
@Test
public void testCaseAware() throws Exception {
WhitelistBlacklist whitelistBlacklist = new WhitelistBlacklist("dB*.Table*", "*.Tablea", false);
Assert.assertTrue(whitelistBlacklist.acceptDb("dBa"));
Assert.assertTrue(whitelistBlacklist.acceptDb("dBb"));
Assert.assertFalse(whitelistBlacklist.acceptDb("dbb"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dBa", "Tablea"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dBa", "Tableb"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dbb", "Tableb"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dBb", "tableb"));
}
@Test
public void testWhitelistBlacklist() throws Exception {
Map<String, String> configMap = Maps.newHashMap();
configMap.put("whitelist", "dba");
configMap.put("blacklist", "dba.tablea");
WhitelistBlacklist whitelistBlacklist = new WhitelistBlacklist(ConfigFactory.parseMap(configMap));
Assert.assertTrue(whitelistBlacklist.acceptDb("dba"));
Assert.assertFalse(whitelistBlacklist.acceptDb("dbb"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dba", "tablea"));
Assert.assertTrue(whitelistBlacklist.acceptTable("dba", "tableb"));
Assert.assertFalse(whitelistBlacklist.acceptTable("dbb", "tableb"));
}
}
| 2,364 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/hive/UnpartitionedTableFileSetTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import com.google.common.base.Optional;
import com.google.common.base.Predicates;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.junit.Assert;
import org.mockito.Mockito;
import org.testng.annotations.Test;
import static org.mockito.AdditionalAnswers.returnsFirstArg;
import java.util.List;
public class UnpartitionedTableFileSetTest {
@Test(expectedExceptions = { HiveTableLocationNotMatchException.class })
public void testHiveTableLocationNotMatchException() throws Exception {
Path testPath = new Path("/testPath/db/table");
Path existingTablePath = new Path("/existing/testPath/db/table");
Table table = new Table("testDb","table1");
table.setDataLocation(testPath);
Table existingTargetTable = new Table("testDb","table1");
existingTargetTable.setDataLocation(existingTablePath);
HiveDataset hiveDataset = Mockito.mock(HiveDataset.class);
Mockito.when(hiveDataset.getTable()).thenReturn(table);
HiveCopyEntityHelper helper = Mockito.mock(HiveCopyEntityHelper.class);
Mockito.when(helper.getDataset()).thenReturn(hiveDataset);
Mockito.when(helper.getExistingTargetTable()).thenReturn(Optional.of(existingTargetTable));
Mockito.when(helper.getTargetTable()).thenReturn(table);
// Mock filesystem resolver
FileSystem mockFS = Mockito.mock(FileSystem.class);
Mockito.when(helper.getTargetFs()).thenReturn(mockFS);
Mockito.when(mockFS.resolvePath(Mockito.any())).then(returnsFirstArg());
Mockito.when(helper.getExistingEntityPolicy()).thenReturn(HiveCopyEntityHelper.ExistingEntityPolicy.ABORT);
MetricContext metricContext = MetricContext.builder("testUnpartitionedTableFileSet").build();
EventSubmitter eventSubmitter = new EventSubmitter.Builder(metricContext,"loc.nomatch.exp").build();
Mockito.when(helper.getEventSubmitter()).thenReturn(eventSubmitter);
UnpartitionedTableFileSet upts = new UnpartitionedTableFileSet("testLocationMatch",hiveDataset,helper);
List<CopyEntity> copyEntities = (List<CopyEntity>)upts.generateCopyEntities();
}
@Test
public void testHiveTableLocationMatchDifferentPathsResolved() throws Exception {
Path testPath = new Path("/testPath/db/table");
Path existingTablePath = new Path("/existing/testPath/db/table");
Table table = new Table("testDb","table1");
table.setDataLocation(testPath);
Table existingTargetTable = new Table("testDb","table1");
existingTargetTable.setDataLocation(existingTablePath);
HiveDataset hiveDataset = Mockito.mock(HiveDataset.class);
Mockito.when(hiveDataset.getTable()).thenReturn(table);
HiveCopyEntityHelper helper = Mockito.mock(HiveCopyEntityHelper.class);
Mockito.when(helper.getDataset()).thenReturn(hiveDataset);
Mockito.when(helper.getExistingTargetTable()).thenReturn(Optional.of(existingTargetTable));
Mockito.when(helper.getTargetTable()).thenReturn(table);
// Only test that the files will be empty and hive will mark that the paths are equivalent, shortcircuit out
Mockito.when(helper.getFastTableSkip()).thenReturn(Optional.of(Predicates.alwaysTrue()));
// Mock filesystem resolver
FileSystem mockFS = Mockito.mock(FileSystem.class);
Mockito.when(helper.getTargetFs()).thenReturn(mockFS);
Mockito.when(mockFS.resolvePath(Mockito.any())).thenReturn(new Path("hdfs://testPath/db/table"));
Mockito.when(helper.getExistingEntityPolicy()).thenReturn(HiveCopyEntityHelper.ExistingEntityPolicy.ABORT);
MetricContext metricContext = MetricContext.builder("testUnpartitionedTableFileSet").build();
EventSubmitter eventSubmitter = new EventSubmitter.Builder(metricContext,"loc.nomatch.exp").build();
Mockito.when(helper.getEventSubmitter()).thenReturn(eventSubmitter);
UnpartitionedTableFileSet upts = new UnpartitionedTableFileSet("testLocationMatch",hiveDataset,helper);
List<CopyEntity> copyEntities = (List<CopyEntity>)upts.generateCopyEntities();
// Size should be 0 since fast table skip predicate is always true
Assert.assertEquals(copyEntities.size(), 0);
}
}
| 2,365 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/hive/HiveDatasetFinderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.SetMultimap;
import com.google.common.collect.Sets;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.copy.predicates.TableTypeFilter;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.util.AutoReturnableObject;
public class HiveDatasetFinderTest {
@Test
public void testDatasetFinder() throws Exception {
List<HiveDatasetFinder.DbAndTable> dbAndTables = Lists.newArrayList();
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", "table1"));
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", "table2"));
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", "table3"));
HiveMetastoreClientPool pool = getTestPool(dbAndTables);
Properties properties = new Properties();
properties.put(HiveDatasetFinder.HIVE_DATASET_PREFIX + "." + WhitelistBlacklist.WHITELIST, "");
HiveDatasetFinder finder = new TestHiveDatasetFinder(FileSystem.getLocal(new Configuration()), properties, pool);
List<HiveDataset> datasets = Lists.newArrayList(finder.getDatasetsIterator());
Assert.assertEquals(datasets.size(), 3);
}
@Test
public void testTableFilter() throws Exception {
List<HiveDatasetFinder.DbAndTable> dbAndTables = Lists.newArrayList();
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", "table1"));
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", "table2"));
dbAndTables.add(new DatePartitionTable("db1", "table3"));
dbAndTables.add(new DatePartitionTable("db1", "table4"));
HiveMetastoreClientPool pool = getTestPool(dbAndTables);
Properties properties = new Properties();
properties.put(HiveDatasetFinder.HIVE_DATASET_PREFIX + "." + WhitelistBlacklist.WHITELIST, "");
// Test snapshot only
properties.put(HiveDatasetFinder.TABLE_FILTER, TableTypeFilter.class.getName());
properties.put(TableTypeFilter.FILTER_TYPE, "snapshot");
HiveDatasetFinder finder = new TestHiveDatasetFinder(FileSystem.getLocal(new Configuration()), properties, pool);
List<HiveDataset> datasets = Lists.newArrayList(finder.getDatasetsIterator());
Assert.assertEquals(datasets.size(), 2);
Assert.assertTrue(datasets.stream().anyMatch(dataset -> dataset.getDbAndTable().toString().equals("db1.table1")));
Assert.assertTrue(datasets.stream().anyMatch(dataset -> dataset.getDbAndTable().toString().equals("db1.table2")));
// Test partitioned table only
properties.put(TableTypeFilter.FILTER_TYPE, "partitioned");
finder = new TestHiveDatasetFinder(FileSystem.getLocal(new Configuration()), properties, pool);
datasets = Lists.newArrayList(finder.getDatasetsIterator());
Assert.assertEquals(datasets.size(), 2);
Assert.assertTrue(datasets.stream().anyMatch(dataset -> dataset.getDbAndTable().toString().equals("db1.table3")));
Assert.assertTrue(datasets.stream().anyMatch(dataset -> dataset.getDbAndTable().toString().equals("db1.table4")));
}
@Test
public void testException() throws Exception {
List<HiveDatasetFinder.DbAndTable> dbAndTables = Lists.newArrayList();
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", "table1"));
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", TestHiveDatasetFinder.THROW_EXCEPTION));
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", "table3"));
HiveMetastoreClientPool pool = getTestPool(dbAndTables);
Properties properties = new Properties();
properties.put(HiveDatasetFinder.HIVE_DATASET_PREFIX + "." + WhitelistBlacklist.WHITELIST, "");
HiveDatasetFinder finder = new TestHiveDatasetFinder(FileSystem.getLocal(new Configuration()), properties, pool);
List<HiveDataset> datasets = Lists.newArrayList(finder.getDatasetsIterator());
Assert.assertEquals(datasets.size(), 2);
}
@Test
public void testWhitelist() throws Exception {
List<HiveDatasetFinder.DbAndTable> dbAndTables = Lists.newArrayList();
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", "table1"));
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", "table2"));
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db2", "table1"));
HiveMetastoreClientPool pool = getTestPool(dbAndTables);
Properties properties = new Properties();
properties.put(HiveDatasetFinder.HIVE_DATASET_PREFIX + "." + WhitelistBlacklist.WHITELIST, "db1");
HiveDatasetFinder finder = new TestHiveDatasetFinder(FileSystem.getLocal(new Configuration()), properties, pool);
List<HiveDataset> datasets = Lists.newArrayList(finder.getDatasetsIterator());
Assert.assertEquals(datasets.size(), 2);
Assert.assertEquals(datasets.get(0).getTable().getDbName(), "db1");
Assert.assertEquals(datasets.get(1).getTable().getDbName(), "db1");
Assert.assertEquals(Sets.newHashSet(datasets.get(0).getTable().getTableName(), datasets.get(1).getTable().getTableName()),
Sets.newHashSet("table1", "table2"));
}
@Test
public void testBlacklist() throws Exception {
List<HiveDatasetFinder.DbAndTable> dbAndTables = Lists.newArrayList();
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", "table1"));
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", "table2"));
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db2", "table1"));
HiveMetastoreClientPool pool = getTestPool(dbAndTables);
Properties properties = new Properties();
properties.put(HiveDatasetFinder.HIVE_DATASET_PREFIX + "." + WhitelistBlacklist.WHITELIST, "");
properties.put(HiveDatasetFinder.HIVE_DATASET_PREFIX + "." + WhitelistBlacklist.BLACKLIST, "db2");
HiveDatasetFinder finder = new TestHiveDatasetFinder(FileSystem.getLocal(new Configuration()), properties, pool);
List<HiveDataset> datasets = Lists.newArrayList(finder.getDatasetsIterator());
Assert.assertEquals(datasets.size(), 2);
Assert.assertEquals(datasets.get(0).getTable().getDbName(), "db1");
Assert.assertEquals(datasets.get(1).getTable().getDbName(), "db1");
Assert.assertEquals(Sets.newHashSet(datasets.get(0).getTable().getTableName(), datasets.get(1).getTable().getTableName()),
Sets.newHashSet("table1", "table2"));
}
@Test
public void testTableList() throws Exception {
List<HiveDatasetFinder.DbAndTable> dbAndTables = Lists.newArrayList();
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", "table1"));
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", "table2"));
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", "table3"));
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db2", "table1"));
HiveMetastoreClientPool pool = getTestPool(dbAndTables);
Properties properties = new Properties();
properties.put(HiveDatasetFinder.DB_KEY, "db1");
properties.put(HiveDatasetFinder.TABLE_PATTERN_KEY, "table1|table2");
HiveDatasetFinder finder = new TestHiveDatasetFinder(FileSystem.getLocal(new Configuration()), properties, pool);
List<HiveDataset> datasets = Lists.newArrayList(finder.getDatasetsIterator());
Assert.assertEquals(datasets.size(), 2);
Assert.assertEquals(datasets.get(0).getTable().getDbName(), "db1");
Assert.assertEquals(datasets.get(1).getTable().getDbName(), "db1");
Assert.assertEquals(Sets.newHashSet(datasets.get(0).getTable().getTableName(), datasets.get(1).getTable().getTableName()),
Sets.newHashSet("table1", "table2"));
}
@Test
public void testDatasetConfig() throws Exception {
List<HiveDatasetFinder.DbAndTable> dbAndTables = Lists.newArrayList();
dbAndTables.add(new HiveDatasetFinder.DbAndTable("db1", "table1"));
HiveMetastoreClientPool pool = getTestPool(dbAndTables);
Properties properties = new Properties();
properties.put(HiveDatasetFinder.HIVE_DATASET_PREFIX + "." + WhitelistBlacklist.WHITELIST, "");
properties.put("hive.dataset.test.conf1", "conf1-val1");
properties.put("hive.dataset.test.conf2", "conf2-val2");
HiveDatasetFinder finder = new TestHiveDatasetFinder(FileSystem.getLocal(new Configuration()), properties, pool);
List<HiveDataset> datasets = Lists.newArrayList(finder.getDatasetsIterator());
Assert.assertEquals(datasets.size(), 1);
HiveDataset hiveDataset = datasets.get(0);
Assert.assertEquals(hiveDataset.getDatasetConfig().getString("hive.dataset.test.conf1"), "conf1-val1");
Assert.assertEquals(hiveDataset.getDatasetConfig().getString("hive.dataset.test.conf2"), "conf2-val2");
// Test scoped configs with prefix
properties.put(HiveDatasetFinder.HIVE_DATASET_CONFIG_PREFIX_KEY, "hive.dataset.test");
finder = new TestHiveDatasetFinder(FileSystem.getLocal(new Configuration()), properties, pool);
datasets = Lists.newArrayList(finder.getDatasetsIterator());
Assert.assertEquals(datasets.size(), 1);
hiveDataset = datasets.get(0);
Assert.assertEquals(hiveDataset.getDatasetConfig().getString("conf1"), "conf1-val1");
Assert.assertEquals(hiveDataset.getDatasetConfig().getString("conf2"), "conf2-val2");
}
private HiveMetastoreClientPool getTestPool(List<HiveDatasetFinder.DbAndTable> dbAndTables) throws Exception {
SetMultimap<String, String> entities = HashMultimap.create();
for (HiveDatasetFinder.DbAndTable dbAndTable : dbAndTables) {
entities.put(dbAndTable.getDb(), dbAndTable.getTable());
}
HiveMetastoreClientPool pool = Mockito.mock(HiveMetastoreClientPool.class);
IMetaStoreClient client = Mockito.mock(IMetaStoreClient.class);
Mockito.when(client.getAllDatabases()).thenReturn(Lists.newArrayList(entities.keySet()));
for (String db : entities.keySet()) {
Mockito.doReturn(Lists.newArrayList(entities.get(db))).when(client).getAllTables(db);
}
for (HiveDatasetFinder.DbAndTable dbAndTable : dbAndTables) {
Table table = new Table();
table.setDbName(dbAndTable.getDb());
table.setTableName(dbAndTable.getTable());
if (dbAndTable instanceof DatePartitionTable) {
table.setPartitionKeys(((DatePartitionTable) dbAndTable).getPartitionKeys());
}
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation("/tmp/test");
table.setSd(sd);
Mockito.doReturn(table).when(client).getTable(dbAndTable.getDb(), dbAndTable.getTable());
}
@SuppressWarnings("unchecked")
AutoReturnableObject<IMetaStoreClient> aro = Mockito.mock(AutoReturnableObject.class);
Mockito.when(aro.get()).thenReturn(client);
Mockito.when(pool.getHiveRegProps()).thenReturn(null);
Mockito.when(pool.getClient()).thenReturn(aro);
return pool;
}
private class TestHiveDatasetFinder extends HiveDatasetFinder {
public static final String THROW_EXCEPTION = "throw_exception";
public TestHiveDatasetFinder(FileSystem fs, Properties properties, HiveMetastoreClientPool pool)
throws IOException {
super(fs, properties, pool);
}
@Override
protected HiveDataset createHiveDataset(Table table, Config config)
throws IOException {
if (table.getTableName().equals(THROW_EXCEPTION)) {
throw new IOException("bad table");
}
return new HiveDataset(super.fs, super.clientPool, new org.apache.hadoop.hive.ql.metadata.Table(table), config);
}
}
private static class DatePartitionTable extends HiveDatasetFinder.DbAndTable {
static final FieldSchema DATE_PARTITION_KEY = new FieldSchema("datepartition", "String", "");
public DatePartitionTable(String db, String table) {
super(db, table);
}
public List<FieldSchema> getPartitionKeys() {
return Lists.newArrayList(DATE_PARTITION_KEY);
}
}
}
| 2,366 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/hive | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/hive/filter/DateRangePartitionFilterGeneratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive.filter;
import java.util.Properties;
import org.apache.gobblin.data.management.copy.hive.PartitionFilterGenerator;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.joda.time.DateTime;
import org.joda.time.DateTimeUtils;
import org.testng.Assert;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
@Test(groups = { "SystemTimeTests"})
public class DateRangePartitionFilterGeneratorTest {
@BeforeMethod
public void setUp()
throws Exception {
DateTimeUtils.setCurrentMillisFixed(new DateTime(2016,3,15,10,15).getMillis());
}
@AfterMethod
public void tearDown()
throws Exception {
DateTimeUtils.setCurrentMillisSystem();
}
@Test
public void testInitialization() {
PartitionFilterGenerator filter = GobblinConstructorUtils.invokeConstructor(PartitionFilterGenerator.class,
DateRangePartitionFilterGenerator.class.getName(), System.getProperties());
Assert.assertTrue(filter instanceof DateRangePartitionFilterGenerator);
}
@Test
public void test() {
doTest("datePartition", "2020-01-01", "2020-01-10", "datePartition between \"2020-01-01\" and \"2020-01-10\"");
}
private void doTest(String column, String sDate, String eDate, String expected) {
Properties properties = new Properties();
properties.put(DateRangePartitionFilterGenerator.PARTITION_COLUMN, column);
properties.put(DateRangePartitionFilterGenerator.START_DATE, sDate);
properties.put(DateRangePartitionFilterGenerator.END_DATE, eDate);
PartitionFilterGenerator filterImpl = GobblinConstructorUtils.invokeConstructor(PartitionFilterGenerator.class,
DateRangePartitionFilterGenerator.class.getName(), properties);
Assert.assertEquals(filterImpl.getFilter(null), expected);
}
}
| 2,367 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/hive | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/hive/filter/LookbackPartitionFilterGeneratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.hive.filter;
import java.util.Properties;
import org.apache.gobblin.data.management.copy.hive.PartitionFilterGenerator;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.joda.time.DateTime;
import org.joda.time.DateTimeUtils;
import org.testng.Assert;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
@Test(groups = { "SystemTimeTests"})
public class LookbackPartitionFilterGeneratorTest {
@BeforeMethod
public void setUp()
throws Exception {
DateTimeUtils.setCurrentMillisFixed(new DateTime(2016,3,15,10,15).getMillis());
}
@AfterMethod
public void tearDown()
throws Exception {
DateTimeUtils.setCurrentMillisSystem();
}
@Test
public void testInitialization() {
PartitionFilterGenerator filter = GobblinConstructorUtils.invokeConstructor(PartitionFilterGenerator.class,
LookbackPartitionFilterGenerator.class.getName(), System.getProperties());
Assert.assertTrue(filter instanceof LookbackPartitionFilterGenerator);
}
@Test
public void test() {
doTest("datePartition", "P1D", "YYYY-MM-dd-HH", "datePartition >= \"2016-03-14-10\"");
doTest("datePartition", "P2D", "YYYY-MM-dd-HH", "datePartition >= \"2016-03-13-10\"");
doTest("datePartition", "PT4H", "YYYY-MM-dd-HH", "datePartition >= \"2016-03-15-06\"");
doTest("myColumn", "PT4H", "YYYY-MM-dd-HH", "myColumn >= \"2016-03-15-06\"");
}
private void doTest(String column, String lookback, String format, String expected) {
Properties properties = new Properties();
properties.put(LookbackPartitionFilterGenerator.PARTITION_COLUMN, column);
properties.put(LookbackPartitionFilterGenerator.LOOKBACK, lookback);
properties.put(LookbackPartitionFilterGenerator.DATETIME_FORMAT, format);
PartitionFilterGenerator filterImpl = GobblinConstructorUtils.invokeConstructor(PartitionFilterGenerator.class,
LookbackPartitionFilterGenerator.class.getName(), properties);
Assert.assertEquals(filterImpl.getFilter(null), expected);
}
}
| 2,368 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/replication/ConfigBasedDatasetTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.net.URI;
import java.util.Collection;
import java.util.Properties;
import java.util.Set;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.data.management.dataset.DatasetUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.Sets;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.copy.CopyConfiguration;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.copy.PreserveAttributes;
import org.apache.gobblin.data.management.copy.entities.PostPublishStep;
import org.apache.gobblin.data.management.copy.entities.PrePublishStep;
import org.apache.gobblin.source.extractor.ComparableWatermark;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.util.FileListUtils;
import org.apache.gobblin.util.PathUtils;
import org.apache.gobblin.util.commit.DeleteFileCommitStep;
import org.apache.gobblin.util.filesystem.DataFileVersionStrategy;
/**
* Unit test for {@link ConfigBasedDataset}
* @author mitu
*
*/
@Test(groups = {"gobblin.data.management.copy.replication"})
@Slf4j
public class ConfigBasedDatasetTest {
public Collection<? extends CopyEntity> testGetCopyableFilesHelper(String sourceDir, String destinationDir,
long sourceWatermark, boolean isFilterEnabled) throws Exception {
FileSystem localFs = FileSystem.getLocal(new Configuration());
URI local = localFs.getUri();
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/publisher");
PathFilter pathFilter = DatasetUtils.instantiatePathFilter(properties);
boolean applyFilterToDirectories = false;
if (isFilterEnabled) {
properties.setProperty(DatasetUtils.CONFIGURATION_KEY_PREFIX + "path.filter.class",
"org.apache.gobblin.util.filters.HiddenFilter");
properties.setProperty(CopyConfiguration.APPLY_FILTER_TO_DIRECTORIES, "true");
pathFilter = DatasetUtils.instantiatePathFilter(properties);
applyFilterToDirectories =
Boolean.parseBoolean(properties.getProperty(CopyConfiguration.APPLY_FILTER_TO_DIRECTORIES, "false"));
}
CopyConfiguration copyConfiguration =
CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties)
.publishDir(new Path(destinationDir))
.preserve(PreserveAttributes.fromMnemonicString("ugp"))
.build();
ReplicationMetaData mockMetaData = Mockito.mock(ReplicationMetaData.class);
Mockito.when(mockMetaData.toString()).thenReturn("Mock Meta Data");
ReplicationConfiguration mockRC = Mockito.mock(ReplicationConfiguration.class);
Mockito.when(mockRC.getCopyMode()).thenReturn(ReplicationCopyMode.PULL);
Mockito.when(mockRC.getMetaData()).thenReturn(mockMetaData);
Mockito.when(mockRC.getVersionStrategyFromConfigStore()).thenReturn(Optional.of(DataFileVersionStrategy.DEFAULT_DATA_FILE_VERSION_STRATEGY));
Mockito.when(mockRC.getEnforceFileSizeMatchFromConfigStore()).thenReturn(Optional.absent());
HadoopFsEndPoint copyFrom = Mockito.mock(HadoopFsEndPoint.class);
Mockito.when(copyFrom.getDatasetPath()).thenReturn(new Path(sourceDir));
Mockito.when(copyFrom.getFsURI()).thenReturn(local);
ComparableWatermark sw = new LongWatermark(sourceWatermark);
Mockito.when(copyFrom.getWatermark()).thenReturn(Optional.of(sw));
Mockito.when(copyFrom.getFiles())
.thenReturn(
FileListUtils.listFilesRecursively(localFs, new Path(sourceDir), pathFilter, applyFilterToDirectories));
HadoopFsEndPoint copyTo = Mockito.mock(HadoopFsEndPoint.class);
Mockito.when(copyTo.getDatasetPath()).thenReturn(new Path(destinationDir));
Mockito.when(copyTo.getFsURI()).thenReturn(local);
Optional<ComparableWatermark> tmp = Optional.absent();
Mockito.when(copyTo.getWatermark()).thenReturn(tmp);
Mockito.when(copyTo.getFiles())
.thenReturn(FileListUtils.listFilesRecursively(localFs, new Path(destinationDir), pathFilter,
applyFilterToDirectories));
CopyRoute route = Mockito.mock(CopyRoute.class);
Mockito.when(route.getCopyFrom()).thenReturn(copyFrom);
Mockito.when(route.getCopyTo()).thenReturn(copyTo);
ConfigBasedDataset dataset = new ConfigBasedDataset(mockRC, properties, route);
Collection<? extends CopyEntity> copyableFiles = dataset.getCopyableFiles(localFs, copyConfiguration);
return copyableFiles;
}
@Test
public void testGetCopyableFiles() throws Exception {
String sourceDir = getClass().getClassLoader().getResource("configBasedDatasetTest/src").getFile();
String destinationDir = getClass().getClassLoader().getResource("configBasedDatasetTest/dest").getFile();
long sourceWatermark = 100L;
Collection<? extends CopyEntity> copyableFiles =
testGetCopyableFilesHelper(sourceDir, destinationDir, sourceWatermark, false);
Assert.assertEquals(copyableFiles.size(), 8);
copyableFiles = testGetCopyableFilesHelper(sourceDir, destinationDir, sourceWatermark, true);
Assert.assertEquals(copyableFiles.size(), 6);
Set<Path> paths =
Sets.newHashSet(new Path("dir1/file2"), new Path("dir1/file1"), new Path("dir2/file1"), new Path("dir2/file3"));
for (CopyEntity copyEntity : copyableFiles) {
if (copyEntity instanceof CopyableFile) {
CopyableFile file = (CopyableFile) copyEntity;
Path originRelativePath =
PathUtils.relativizePath(PathUtils.getPathWithoutSchemeAndAuthority(file.getOrigin().getPath()),
PathUtils.getPathWithoutSchemeAndAuthority(new Path(sourceDir)));
Path targetRelativePath =
PathUtils.relativizePath(PathUtils.getPathWithoutSchemeAndAuthority(file.getDestination()),
PathUtils.getPathWithoutSchemeAndAuthority(new Path(destinationDir)));
Assert.assertTrue(paths.contains(originRelativePath));
Assert.assertTrue(paths.contains(targetRelativePath));
Assert.assertEquals(originRelativePath, targetRelativePath);
} else if (copyEntity instanceof PrePublishStep) {
PrePublishStep pre = (PrePublishStep) copyEntity;
Assert.assertTrue(pre.getStep() instanceof DeleteFileCommitStep);
// need to delete this file
Assert.assertTrue(pre.explain().indexOf("configBasedDatasetTest/dest/dir1/file1") > 0);
} else if (copyEntity instanceof PostPublishStep) {
PostPublishStep post = (PostPublishStep) copyEntity;
Assert.assertTrue(post.getStep() instanceof WatermarkMetadataGenerationCommitStep);
Assert.assertTrue(
post.explain().indexOf("dest/_metadata") > 0 && post.explain().indexOf("" + sourceWatermark) > 0);
} else {
throw new Exception("Wrong type");
}
}
}
}
| 2,369 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/replication/ReplicationConfigurationTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import java.util.List;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
@Test(groups = {"gobblin.data.management.copy.replication"})
public class ReplicationConfigurationTest {
@Test
public void testValidConfigsInPullMode() throws Exception{
Config c = ConfigFactory.parseResources(getClass().getClassLoader(), "replicationConfigTest/validCompleteDataset.conf");
ReplicationConfiguration rc = ReplicationConfiguration.buildFromConfig(c);
this.checkReplicationConfig_Pull(rc);
}
@Test
public void testValidConfigsInPullMode_withTopologyPicker() throws Exception{
Config c = ConfigFactory.parseResources(getClass().getClassLoader(), "replicationConfigTest/validCompleteDataset_PullMode2.conf");
ReplicationConfiguration rc = ReplicationConfiguration.buildFromConfig(c);
this.checkReplicationConfig_Pull(rc);
}
@Test
public void testValidConfigsInPushMode_withClusterResolve() throws Exception{
Config c = ConfigFactory.parseResources(getClass().getClassLoader(), "replicationConfigTest/validCompleteDataset_PushMode.conf").resolve();
ReplicationConfiguration rc = ReplicationConfiguration.buildFromConfig(c);
this.checkReplicationConfig_Push(rc);
}
@Test
public void testValidConfigsInPushMode_withTopologyPicker() throws Exception{
Config c = ConfigFactory.parseResources(getClass().getClassLoader(), "replicationConfigTest/validCompleteDataset_PushMode2.conf").resolve();
ReplicationConfiguration rc = ReplicationConfiguration.buildFromConfig(c);
this.checkReplicationConfig_Push(rc);
}
private void checkReplicationConfigStaticPart(ReplicationConfiguration rc) throws Exception{
ReplicationMetaData md = rc.getMetaData();
Assert.assertTrue(md.getValues().get().get(ReplicationConfiguration.METADATA_JIRA).equals("jira-4455"));
Assert.assertTrue(md.getValues().get().get(ReplicationConfiguration.METADATA_NAME).equals("profileTest"));
Assert.assertTrue(md.getValues().get().get(ReplicationConfiguration.METADATA_OWNER).equals("mitu"));
EndPoint sourceTmp = rc.getSource();
Assert.assertTrue(sourceTmp instanceof SourceHadoopFsEndPoint);
SourceHadoopFsEndPoint source = (SourceHadoopFsEndPoint)sourceTmp;
Assert.assertTrue(source.getEndPointName().equals(ReplicationConfiguration.REPLICATION_SOURCE));
Assert.assertTrue(source.isSource());
HadoopFsReplicaConfig innerConf = source.getRc();
Assert.assertTrue(innerConf.getClustername().equals("cluster1"));
Assert.assertTrue(innerConf.getFsURI().toString().equals("hdfs://coloX-cluster1nn01.grid.com:9000"));
Assert.assertTrue(innerConf.getPath().toString().equals("/jobs/mitu/profileTest"));
Assert.assertTrue(innerConf.getColo().equals("coloX"));
List<EndPoint> replicas = rc.getReplicas();
Assert.assertTrue(replicas.size()==4);
EndPoint replica_holdem = replicas.get(0);
innerConf = ((ReplicaHadoopFsEndPoint)replica_holdem).getRc();
Assert.assertTrue(innerConf.getClustername().equals("cluster4"));
Assert.assertTrue(innerConf.getFsURI().toString().equals("hdfs://coloY-cluster4nn01.grid.com:9000"));
Assert.assertTrue(innerConf.getPath().toString().equals("/data/derived/oncluster4"));
Assert.assertTrue(innerConf.getColo().equals("coloY"));
EndPoint replica_uno = replicas.get(1);
innerConf = ((ReplicaHadoopFsEndPoint)replica_uno).getRc();
Assert.assertTrue(innerConf.getClustername().equals("cluster3"));
Assert.assertTrue(innerConf.getFsURI().toString().equals("hdfs://coloY-cluster3nn01.grid.com:9000"));
Assert.assertTrue(innerConf.getPath().toString().equals("/data/derived/oncluster3"));
Assert.assertTrue(innerConf.getColo().equals("coloY"));
EndPoint replica_war = replicas.get(2);
innerConf = ((ReplicaHadoopFsEndPoint)replica_war).getRc();
Assert.assertTrue(innerConf.getClustername().equals("cluster1"));
Assert.assertTrue(innerConf.getFsURI().toString().equals("hdfs://coloX-cluster1nn01.grid.com:9000"));
Assert.assertTrue(innerConf.getPath().toString().equals("/data/derived/oncluster1"));
Assert.assertTrue(innerConf.getColo().equals("coloX"));
EndPoint replica_tarock = replicas.get(3);
innerConf = ((ReplicaHadoopFsEndPoint)replica_tarock).getRc();
Assert.assertTrue(innerConf.getClustername().equals("cluster2"));
Assert.assertTrue(innerConf.getFsURI().toString().equals("hdfs://coloX-cluster2nn01.grid.com:9000"));
Assert.assertTrue(innerConf.getPath().toString().equals("/data/derived/oncluster2"));
Assert.assertTrue(innerConf.getColo().equals("coloX"));
}
private void checkReplicationConfig_Push(ReplicationConfiguration rc) throws Exception{
ReplicationCopyMode copyMode = rc.getCopyMode();
Assert.assertTrue(copyMode == ReplicationCopyMode.PUSH);
checkReplicationConfigStaticPart(rc);
List<EndPoint> replicas = rc.getReplicas();
EndPoint source = rc.getSource();
EndPoint replica4 = replicas.get(0);
EndPoint replica3 = replicas.get(1);
EndPoint replica1 = replicas.get(2);
EndPoint replica2 = replicas.get(3);
DataFlowTopology topology = rc.getDataFlowToplogy();
Assert.assertTrue(topology.getDataFlowPaths().size()==4);
for(DataFlowTopology.DataFlowPath p: topology.getDataFlowPaths()){
List<CopyRoute> pairs = p.getCopyRoutes();
Assert.assertTrue(!pairs.isEmpty());
String copyFromName = pairs.get(0).getCopyFrom().getEndPointName();
if(copyFromName.equals("replica1")){
Assert.assertTrue(pairs.size()==1);
Assert.assertTrue(pairs.get(0).getCopyFrom() == replica1 );
Assert.assertTrue(pairs.get(0).getCopyTo() == replica2 );
}
else if(copyFromName.equals("replica2")){
Assert.assertTrue(pairs.size()==1);
Assert.assertTrue(pairs.get(0).getCopyFrom() == replica2 );
Assert.assertTrue(pairs.get(0).getCopyTo() == replica4 );
}
else if(copyFromName.equals("replica4")){
Assert.assertTrue(pairs.size()==1);
Assert.assertTrue(pairs.get(0).getCopyFrom() == replica4 );
Assert.assertTrue(pairs.get(0).getCopyTo() == replica3 );
}
else if(copyFromName.equals(ReplicationConfiguration.REPLICATION_SOURCE)){
Assert.assertTrue(pairs.size()==1);
Assert.assertTrue(pairs.get(0).getCopyFrom() == source );
Assert.assertTrue(pairs.get(0).getCopyTo() == replica1 );
}
else{
throw new Exception("CopyFrom name is invalid " +copyFromName);
}
}
}
private void checkReplicationConfig_Pull(ReplicationConfiguration rc) throws Exception{
ReplicationCopyMode copyMode = rc.getCopyMode();
Assert.assertTrue(copyMode == ReplicationCopyMode.PULL);
checkReplicationConfigStaticPart(rc);
List<EndPoint> replicas = rc.getReplicas();
EndPoint source = rc.getSource();
EndPoint replica4 = replicas.get(0);
EndPoint replica3 = replicas.get(1);
EndPoint replica1 = replicas.get(2);
EndPoint replica2 = replicas.get(3);
DataFlowTopology topology = rc.getDataFlowToplogy();
Assert.assertTrue(topology.getDataFlowPaths().size()==4);
for(DataFlowTopology.DataFlowPath p: topology.getDataFlowPaths()){
List<CopyRoute> pairs = p.getCopyRoutes();
Assert.assertTrue(!pairs.isEmpty());
String copyToName = pairs.get(0).getCopyTo().getEndPointName();
if(copyToName.equals("replica1")){
Assert.assertTrue(pairs.size()==1);
Assert.assertTrue(pairs.get(0).getCopyFrom() == source );
Assert.assertTrue(pairs.get(0).getCopyTo() == replica1 );
}
else if(copyToName.equals("replica2")){
Assert.assertTrue(pairs.size()==2);
Assert.assertTrue(pairs.get(0).getCopyFrom() == replica1 );
Assert.assertTrue(pairs.get(0).getCopyTo() == replica2 );
Assert.assertTrue(pairs.get(1).getCopyFrom() == source );
Assert.assertTrue(pairs.get(1).getCopyTo() == replica2 );
}
else if(copyToName.equals("replica4")){
Assert.assertTrue(pairs.size()==3);
Assert.assertTrue(pairs.get(0).getCopyFrom() == replica2 );
Assert.assertTrue(pairs.get(0).getCopyTo() == replica4 );
Assert.assertTrue(pairs.get(1).getCopyFrom() == replica1 );
Assert.assertTrue(pairs.get(1).getCopyTo() == replica4 );
Assert.assertTrue(pairs.get(2).getCopyFrom() == source );
Assert.assertTrue(pairs.get(2).getCopyTo() == replica4 );
}
else if(copyToName.equals("replica3")){
Assert.assertTrue(pairs.size()==4);
Assert.assertTrue(pairs.get(0).getCopyFrom() == replica4 );
Assert.assertTrue(pairs.get(0).getCopyTo() == replica3 );
Assert.assertTrue(pairs.get(1).getCopyFrom() == replica2 );
Assert.assertTrue(pairs.get(1).getCopyTo() == replica3 );
Assert.assertTrue(pairs.get(2).getCopyFrom() == replica1 );
Assert.assertTrue(pairs.get(2).getCopyTo() == replica3 );
Assert.assertTrue(pairs.get(3).getCopyFrom() == source );
Assert.assertTrue(pairs.get(3).getCopyTo() == replica3 );
}
else{
throw new Exception("CopyTo name is invalid " +copyToName);
}
}
}
}
| 2,370 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/replication/ConfigBasedDatasetsFinderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import com.google.common.base.Optional;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.hadoop.fs.Path;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* Unit test for {@link ConfigBasedDatasetsFinder}
* @author mitu
*
*/
@Test(groups = {"gobblin.data.management.copy.replication"})
public class ConfigBasedDatasetsFinderTest {
@Test
public void testGetLeafDatasetURIs() throws URISyntaxException, IOException {
Collection<URI> allDatasetURIs = new ArrayList<URI>();
// leaf URI
allDatasetURIs.add(new URI("/data/derived/browsemaps/entities/anet"));
allDatasetURIs.add(new URI("/data/derived/browsemaps/entities/comp"));
allDatasetURIs.add(new URI("/data/derived/gowl/pymk/invitationsCreationsSends/hourly_data/aggregation/daily"));
allDatasetURIs.add(new URI("/data/derived/gowl/pymk/invitationsCreationsSends/hourly_data/aggregation/daily_dedup"));
// None leaf URI
allDatasetURIs.add(new URI("/data/derived"));
allDatasetURIs.add(new URI("/data/derived/browsemaps"));
allDatasetURIs.add(new URI("/data/derived/browsemaps/entities/"));
allDatasetURIs.add(new URI("/data/derived/gowl/"));
allDatasetURIs.add(new URI("/data/derived/gowl/pymk/"));
allDatasetURIs.add(new URI("/data/derived/gowl/pymk/invitationsCreationsSends/"));
allDatasetURIs.add(new URI("/data/derived/gowl/pymk/invitationsCreationsSends/hourly_data/aggregation"));
// wrong root
allDatasetURIs.add(new URI("/data/derived2"));
// disabled
Set<URI> disabled = new HashSet<URI>();
disabled.add(new URI("/data/derived/gowl/pymk/invitationsCreationsSends/hourly_data/aggregation/daily"));
Set<URI> validURIs = ConfigBasedDatasetsFinder.getValidDatasetURIsHelper(allDatasetURIs, disabled, new Path("/data/derived"));
Assert.assertTrue(validURIs.size() == 3);
Assert.assertTrue(validURIs.contains(new URI("/data/derived/gowl/pymk/invitationsCreationsSends/hourly_data/aggregation/daily_dedup")));
Assert.assertTrue(validURIs.contains(new URI("/data/derived/browsemaps/entities/comp")));
Assert.assertTrue(validURIs.contains(new URI("/data/derived/browsemaps/entities/anet")));
}
@Test
public void blacklistPatternTest() {
Properties properties = new Properties();
properties.setProperty("gobblin.selected.policy", "random");
properties.setProperty("source","random");
properties.setProperty("replicas", "random");
ConfigBasedMultiDatasets configBasedMultiDatasets = new ConfigBasedMultiDatasets();
ReplicationConfiguration rc = Mockito.mock(ReplicationConfiguration.class);
CopyRoute cr = Mockito.mock(CopyRoute.class);
ConfigBasedDataset configBasedDataset = new ConfigBasedDataset(rc, new Properties(), cr, "/test/tmp/word");
ConfigBasedDataset configBasedDataset2 = new ConfigBasedDataset(rc, new Properties(), cr, "/test/a_temporary/word");
ConfigBasedDataset configBasedDataset3 = new ConfigBasedDataset(rc, new Properties(), cr, "/test/go/word");
Pattern pattern1 = Pattern.compile(".*_temporary.*");
Pattern pattern2 = Pattern.compile(".*tmp.*");
List<Pattern> patternList = new ArrayList<>();
patternList.add(pattern1);
patternList.add(pattern2);
Assert.assertFalse(configBasedMultiDatasets.blacklistFilteringHelper(configBasedDataset, Optional.of(patternList)));
Assert.assertFalse(configBasedMultiDatasets.blacklistFilteringHelper(configBasedDataset2, Optional.of(patternList)));
Assert.assertTrue(configBasedMultiDatasets.blacklistFilteringHelper(configBasedDataset3, Optional.of(patternList)));
}
}
| 2,371 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/copy/replication/CopyRouteGeneratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.copy.replication;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import org.apache.gobblin.source.extractor.ComparableWatermark;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
@Test(groups = { "gobblin.data.management.copy.replication" })
public class CopyRouteGeneratorTest {
@Test
public void testCopyRouteGenerator() throws Exception {
long replica1Watermark = 1475304606000L; // Oct 1, 2016
long sourceWatermark = 1475604606000L; // Oct 4, 2016
ReplicaHadoopFsEndPoint notAvailableReplica = Mockito.mock(ReplicaHadoopFsEndPoint.class);
Mockito.when(notAvailableReplica.isFileSystemAvailable()).thenReturn(false);
Optional<ComparableWatermark> tmp = Optional.absent();
Mockito.when(notAvailableReplica.getWatermark()).thenReturn(tmp);
ReplicaHadoopFsEndPoint replica1 = Mockito.mock(ReplicaHadoopFsEndPoint.class);
Mockito.when(replica1.isFileSystemAvailable()).thenReturn(true);
ComparableWatermark cw = new LongWatermark(replica1Watermark) ;
tmp = Optional.of(cw);
Mockito.when(replica1.getWatermark()).thenReturn(tmp);
SourceHadoopFsEndPoint source = Mockito.mock(SourceHadoopFsEndPoint.class);
Mockito.when(source.isFileSystemAvailable()).thenReturn(true);
cw = new LongWatermark(sourceWatermark);
tmp = Optional.of(cw);
Mockito.when(source.getWatermark()).thenReturn(tmp);
ReplicaHadoopFsEndPoint copyToEndPoint = Mockito.mock(ReplicaHadoopFsEndPoint.class);
Mockito.when(copyToEndPoint.isFileSystemAvailable()).thenReturn(true);
CopyRoute cp1 = new CopyRoute(notAvailableReplica, copyToEndPoint);
CopyRoute cp2 = new CopyRoute(replica1, copyToEndPoint);
CopyRoute cp3 = new CopyRoute(source, copyToEndPoint);
DataFlowTopology.DataFlowPath dataFlowPath =
new DataFlowTopology.DataFlowPath(ImmutableList.<CopyRoute> of(cp1, cp2, cp3));
DataFlowTopology dataFlowTopology = new DataFlowTopology();
dataFlowTopology.addDataFlowPath(dataFlowPath);
ReplicationConfiguration rc = Mockito.mock(ReplicationConfiguration.class);
Mockito.when(rc.getCopyMode()).thenReturn(ReplicationCopyMode.PULL);
Mockito.when(rc.getSource()).thenReturn(source);
Mockito.when(rc.getReplicas()).thenReturn(ImmutableList.<EndPoint> of(notAvailableReplica, replica1, copyToEndPoint));
Mockito.when(rc.getDataFlowToplogy()).thenReturn(dataFlowTopology);
CopyRouteGeneratorOptimizedNetworkBandwidthForTest network = new CopyRouteGeneratorOptimizedNetworkBandwidthForTest();
Assert.assertTrue(network.getPullRoute(rc, copyToEndPoint).get().getCopyFrom().equals(replica1));
Assert.assertTrue(network.getPullRoute(rc, copyToEndPoint).get().getCopyFrom().getWatermark()
.get().compareTo(new LongWatermark(replica1Watermark)) == 0);
CopyRouteGeneratorOptimizedLatency latency = new CopyRouteGeneratorOptimizedLatency();
Assert.assertTrue(latency.getPullRoute(rc, copyToEndPoint).get().getCopyFrom().equals(source));
Assert.assertTrue(latency.getPullRoute(rc, copyToEndPoint).get().getCopyFrom().getWatermark()
.get().compareTo(new LongWatermark(sourceWatermark)) == 0);
}
}
| 2,372 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/partition/FileSetTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.partition;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import org.apache.gobblin.data.management.copy.CopyableFile;
import org.apache.gobblin.data.management.dataset.DummyDataset;
public class FileSetTest {
private class TestFile extends CopyableFile {
public TestFile(FileStatus fileStatus) {
super();
this.fileStatus = fileStatus;
}
private final FileStatus fileStatus;
@Override
public FileStatus getFileStatus() {
return this.fileStatus;
}
@Override
public FileStatus getOrigin() {
return this.fileStatus;
}
}
@Test
public void testPartitionBuilder() throws Exception {
String file1 = "file1";
String file2 = "file2";
FileSet<TestFile> fileSet = new FileSet.Builder<TestFile>("test", new DummyDataset(new Path("/path")))
.add(new TestFile(createFileStatus(file1))).add(Lists.newArrayList(new TestFile(createFileStatus(file2))))
.build();
Assert.assertEquals(fileSet.getFiles().size(), 2);
Assert.assertEquals(fileSet.getName(), "test");
Assert.assertEquals(fileSet.getFiles().get(0).getFileStatus().getPath().toString(), file1);
Assert.assertEquals(fileSet.getFiles().get(1).getFileStatus().getPath().toString(), file2);
Assert.assertEquals(fileSet.getTotalEntities(), 2);
Assert.assertEquals(fileSet.getTotalSizeInBytes(), 20);
}
private static FileStatus createFileStatus(String path) {
return new FileStatus(10, false, 0, 0, 0, new Path(path));
}
}
| 2,373 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/policy/HiddenFilterSelectionPolicyTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.policy;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTime;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
public class HiddenFilterSelectionPolicyTest {
@Test
public void testListSelectedVersions() throws Exception {
List<FileSystemDatasetVersion> versionList = new ArrayList<>();
Set<String> pathSet = new HashSet<>();
Path path1 = new Path("/data/dataset/versions/version1");
pathSet.add(path1.toString());
Path path2 = new Path("/data/dataset/versions/version2");
pathSet.add(path2.toString());
Path path3 = new Path("/data/dataset/.temp/tmpPath");
Path path4 = new Path("/data/dataset/_temp/tmpPath");
versionList.add(new TimestampedDatasetVersion(new DateTime(), path1));
versionList.add(new TimestampedDatasetVersion(new DateTime(), path2));
versionList.add(new TimestampedDatasetVersion(new DateTime(), path3));
versionList.add(new TimestampedDatasetVersion(new DateTime(), path4));
List<String> hiddenFilePrefixes = Arrays.asList("_", ".");
List<Config> configList = new ArrayList<>();
Config config1 = ConfigFactory.parseMap(
ImmutableMap.of(HiddenFilterSelectionPolicy.HIDDEN_FILTER_HIDDEN_FILE_PREFIX_KEY, hiddenFilePrefixes));
configList.add(config1);
Config config2 = ConfigFactory.parseMap(
ImmutableMap.of(HiddenFilterSelectionPolicy.HIDDEN_FILTER_HIDDEN_FILE_PREFIX_KEY, "_,."));
configList.add(config2);
for (Config config : configList) {
HiddenFilterSelectionPolicy policy = new HiddenFilterSelectionPolicy(config);
Collection<FileSystemDatasetVersion> selectedVersions = policy.listSelectedVersions(versionList);
Assert.assertEquals(selectedVersions.size(), 2);
for (FileSystemDatasetVersion version : selectedVersions) {
Set<Path> paths = version.getPaths();
for (Path path : paths) {
Assert.assertTrue(pathSet.contains(path.toString()));
}
}
}
}
} | 2,374 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/policy/NewestKSelectionPolicyTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.policy;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
import java.util.Properties;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.data.management.version.DatasetVersion;
/** Unit tests for {@link NewestKSelectionPolicy} */
public class NewestKSelectionPolicyTest {
private static final Map<String, Map<String, Integer>> TEST_CONFIGS =
ImmutableMap.<String, Map<String, Integer>>builder()
.put("empty", ImmutableMap.<String, Integer>builder().build())
.put("selectedPos", ImmutableMap.<String, Integer>builder()
.put(NewestKSelectionPolicy.NEWEST_K_VERSIONS_SELECTED_KEY, 5)
.build())
.put("notSelectedPos", ImmutableMap.<String, Integer>builder()
.put(NewestKSelectionPolicy.NEWEST_K_VERSIONS_NOTSELECTED_KEY, 10)
.build())
.build();
private static final Map<String, Map<String, Integer>> NEG_TEST_CONFIGS =
ImmutableMap.<String, Map<String, Integer>>builder()
.put("bothProps", ImmutableMap.<String, Integer>builder()
.put(NewestKSelectionPolicy.NEWEST_K_VERSIONS_SELECTED_KEY, 5)
.put(NewestKSelectionPolicy.NEWEST_K_VERSIONS_NOTSELECTED_KEY, 5)
.build())
.put("selectedNeg", ImmutableMap.<String, Integer>builder()
.put(NewestKSelectionPolicy.NEWEST_K_VERSIONS_SELECTED_KEY, -5)
.build())
.put("notSelectedNeg", ImmutableMap.<String, Integer>builder()
.put(NewestKSelectionPolicy.NEWEST_K_VERSIONS_NOTSELECTED_KEY, -1)
.build())
.put("selectedBig", ImmutableMap.<String, Integer>builder()
.put(NewestKSelectionPolicy.NEWEST_K_VERSIONS_SELECTED_KEY,
NewestKSelectionPolicy.MAX_VERSIONS_ALLOWED + 1)
.build())
.put("notSelectedBig", ImmutableMap.<String, Integer>builder()
.put(NewestKSelectionPolicy.NEWEST_K_VERSIONS_NOTSELECTED_KEY,
NewestKSelectionPolicy.MAX_VERSIONS_ALLOWED + 1)
.build())
.put("selected0", ImmutableMap.<String, Integer>builder()
.put(NewestKSelectionPolicy.NEWEST_K_VERSIONS_SELECTED_KEY, 0)
.build())
.put("notSelected0", ImmutableMap.<String, Integer>builder()
.put(NewestKSelectionPolicy.NEWEST_K_VERSIONS_NOTSELECTED_KEY, 0)
.build())
.build();
private static final Map<String, Integer> TEST_RESULTS =
ImmutableMap.<String, Integer>builder()
.put("empty", NewestKSelectionPolicy.VERSIONS_SELECTED_DEFAULT)
.put("selectedPos", 5)
.put("notSelectedPos", -10)
.build();
public static class TestStringDatasetVersion implements DatasetVersion,
Comparable<DatasetVersion> {
private String _version;
public TestStringDatasetVersion(String version) {
_version = version;
}
@Override
public int compareTo(DatasetVersion o) {
if (!(o instanceof TestStringDatasetVersion)) {
throw new RuntimeException("Incompatible version: " + o);
}
return _version.compareTo(((TestStringDatasetVersion)o)._version);
}
@Override
public Object getVersion() {
return _version;
}
}
@Test
public void testCreationProps() {
for(Map.Entry<String, Map<String, Integer>> test: TEST_CONFIGS.entrySet()) {
String testName = test.getKey();
Properties testProps = new Properties();
for (Map.Entry<String, Integer> prop: test.getValue().entrySet()) {
testProps.setProperty(prop.getKey(), prop.getValue().toString());
}
NewestKSelectionPolicy policy = new NewestKSelectionPolicy(testProps);
Assert.assertEquals(policy.getVersionsSelected(),
Math.abs(TEST_RESULTS.get(testName).intValue()),
"Failure for test " + testName);
Assert.assertEquals(policy.isExcludeMode(), TEST_RESULTS.get(testName).intValue() < 0,
"Failure for test " + testName);
}
for(Map.Entry<String, Map<String, Integer>> test: NEG_TEST_CONFIGS.entrySet()) {
String testName = test.getKey();
Properties testProps = new Properties();
for (Map.Entry<String, Integer> prop: test.getValue().entrySet()) {
testProps.setProperty(prop.getKey(), prop.getValue().toString());
}
try {
new NewestKSelectionPolicy(testProps);
Assert.fail("Exception expected for test " + testName);
}
catch (RuntimeException e) {
//OK
}
}
}
@Test
public void testCreationConfig() {
for(Map.Entry<String, Map<String, Integer>> test: TEST_CONFIGS.entrySet()) {
String testName = test.getKey();
Config conf = ConfigFactory.parseMap(test.getValue());
NewestKSelectionPolicy policy = new NewestKSelectionPolicy(conf);
Assert.assertEquals(policy.getVersionsSelected(),
Math.abs(TEST_RESULTS.get(testName).intValue()),
"Failure for test " + testName);
Assert.assertEquals(policy.isExcludeMode(), TEST_RESULTS.get(testName).intValue() < 0,
"Failure for test " + testName);
}
for(Map.Entry<String, Map<String, Integer>> test: NEG_TEST_CONFIGS.entrySet()) {
String testName = test.getKey();
Config conf = ConfigFactory.parseMap(test.getValue());
try {
new NewestKSelectionPolicy(conf);
Assert.fail("Exception expected for test " + testName);
}
catch (RuntimeException e) {
// OK
}
}
}
@Test
public void testSelect() {
ArrayList<DatasetVersion> versions = new ArrayList<>();
for (int i = 0; i < 10; ++i) {
versions.add(new TestStringDatasetVersion(String.format("v%03d", i)));
}
//selectedVersions 5 < 10
Config conf = ConfigFactory.empty()
.withValue(NewestKSelectionPolicy.NEWEST_K_VERSIONS_SELECTED_KEY,
ConfigValueFactory.fromAnyRef(5));
NewestKSelectionPolicy policy = new NewestKSelectionPolicy(conf);
Collection<DatasetVersion> res = policy.listSelectedVersions(versions);
int idx = 0;
Assert.assertEquals(res.size(), policy.getVersionsSelected());
for (DatasetVersion v: res) {
Assert.assertEquals(v, versions.get(idx++), "Mismatch for index " + idx);
}
//selectedVersions 15 > 10
conf = ConfigFactory.empty()
.withValue(NewestKSelectionPolicy.NEWEST_K_VERSIONS_SELECTED_KEY,
ConfigValueFactory.fromAnyRef(15));
policy = new NewestKSelectionPolicy(conf);
res = policy.listSelectedVersions(versions);
idx = 0;
Assert.assertEquals(res.size(), versions.size());
for (DatasetVersion v: res) {
Assert.assertEquals(v, versions.get(idx++), "Mismatch for index " + idx);
}
//notSelectedVersions 4 < 10
conf = ConfigFactory.empty()
.withValue(NewestKSelectionPolicy.NEWEST_K_VERSIONS_NOTSELECTED_KEY,
ConfigValueFactory.fromAnyRef(4));
policy = new NewestKSelectionPolicy(conf);
res = policy.listSelectedVersions(versions);
idx = policy.getVersionsSelected();
Assert.assertEquals(res.size(), versions.size() - policy.getVersionsSelected());
for (DatasetVersion v: res) {
Assert.assertEquals(v, versions.get(idx++), "Mismatch for index " + idx);
}
//notSelectedVersions 14 > 10
conf = ConfigFactory.empty()
.withValue(NewestKSelectionPolicy.NEWEST_K_VERSIONS_NOTSELECTED_KEY,
ConfigValueFactory.fromAnyRef(14));
policy = new NewestKSelectionPolicy(conf);
res = policy.listSelectedVersions(versions);
Assert.assertEquals(res.size(), 0);
}
}
| 2,375 |
0 | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/test/java/org/apache/gobblin/data/management/policy/TimeBasedSelectionPolicyTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.policy;
import java.util.Properties;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTime;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.testng.collections.Lists;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
/**
* Test for {@link SelectAfterTimeBasedPolicy}.
*/
@Test(groups = { "gobblin.data.management.policy" })
public class TimeBasedSelectionPolicyTest {
@Test
public void testListCopyableVersions() {
Properties props = new Properties();
Path dummyPath = new Path("dummy");
DateTime dt1 = new DateTime().minusDays(8);
DateTime dt2 = new DateTime().minusDays(6);
props.put(SelectAfterTimeBasedPolicy.TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY, "7d");
SelectAfterTimeBasedPolicy policyLookback7Days = new SelectAfterTimeBasedPolicy(props);
TimestampedDatasetVersion version1 = new TimestampedDatasetVersion(dt1, dummyPath);
TimestampedDatasetVersion version2 = new TimestampedDatasetVersion(dt2, dummyPath);
Assert.assertEquals(policyLookback7Days.listSelectedVersions(Lists.newArrayList(version1, version2)).size(), 1);
props.put(SelectAfterTimeBasedPolicy.TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY, "1h");
SelectAfterTimeBasedPolicy policyLookback1Hour = new SelectAfterTimeBasedPolicy(props);
Assert.assertEquals(policyLookback1Hour.listSelectedVersions(Lists.newArrayList(version1, version2)).size(), 0);
props.put(SelectAfterTimeBasedPolicy.TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY, "9d");
SelectAfterTimeBasedPolicy policyLookback8Days = new SelectAfterTimeBasedPolicy(props);
Assert.assertEquals(policyLookback8Days.listSelectedVersions(Lists.newArrayList(version1, version2)).size(), 2);
}
@Test
public void testSelectAfterTimebasedPolicy() {
Path dummyPath = new Path("dummy");
DateTime dt1 = new DateTime().minusDays(8);
DateTime dt2 = new DateTime().minusDays(6);
Config config =
ConfigFactory.parseMap(ImmutableMap
.of(SelectAfterTimeBasedPolicy.TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY, "7d"));
SelectAfterTimeBasedPolicy policyLookback7Days = new SelectAfterTimeBasedPolicy(config);
TimestampedDatasetVersion version1 = new TimestampedDatasetVersion(dt1, dummyPath);
TimestampedDatasetVersion version2 = new TimestampedDatasetVersion(dt2, dummyPath);
Assert.assertEquals(policyLookback7Days.listSelectedVersions(Lists.newArrayList(version1, version2)).size(), 1);
Assert.assertEquals(
Lists.newArrayList(policyLookback7Days.listSelectedVersions(Lists.newArrayList(version1, version2))).get(0),
version2);
config =
ConfigFactory.parseMap(ImmutableMap
.of(SelectAfterTimeBasedPolicy.TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY, "1h"));
SelectAfterTimeBasedPolicy policyLookback1Hour = new SelectAfterTimeBasedPolicy(config);
Assert.assertEquals(policyLookback1Hour.listSelectedVersions(Lists.newArrayList(version1, version2)).size(), 0);
config =
ConfigFactory.parseMap(ImmutableMap
.of(SelectAfterTimeBasedPolicy.TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY, "9d"));
SelectAfterTimeBasedPolicy policyLookback8Days = new SelectAfterTimeBasedPolicy(config);
Assert.assertEquals(policyLookback8Days.listSelectedVersions(Lists.newArrayList(version1, version2)).size(), 2);
}
@Test
public void testSelectBeforeTimebasedPolicy() {
Path dummyPath = new Path("dummy");
DateTime dt1 = new DateTime().minusDays(8);
DateTime dt2 = new DateTime().minusDays(6);
Config config =
ConfigFactory.parseMap(ImmutableMap
.of(SelectAfterTimeBasedPolicy.TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY, "7d"));
SelectBeforeTimeBasedPolicy policyLookback7Days = new SelectBeforeTimeBasedPolicy(config);
TimestampedDatasetVersion version1 = new TimestampedDatasetVersion(dt1, dummyPath);
TimestampedDatasetVersion version2 = new TimestampedDatasetVersion(dt2, dummyPath);
Assert.assertEquals(policyLookback7Days.listSelectedVersions(Lists.newArrayList(version1, version2)).size(), 1);
Assert.assertEquals(
Lists.newArrayList(policyLookback7Days.listSelectedVersions(Lists.newArrayList(version1, version2))).get(0),
version1);
config =
ConfigFactory.parseMap(ImmutableMap
.of(SelectAfterTimeBasedPolicy.TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY, "1h"));
SelectBeforeTimeBasedPolicy policyLookback1Hour = new SelectBeforeTimeBasedPolicy(config);
Assert.assertEquals(policyLookback1Hour.listSelectedVersions(Lists.newArrayList(version1, version2)).size(), 2);
config =
ConfigFactory.parseMap(ImmutableMap
.of(SelectAfterTimeBasedPolicy.TIME_BASED_SELECTION_LOOK_BACK_TIME_KEY, "9d"));
SelectBeforeTimeBasedPolicy policyLookback9Days = new SelectBeforeTimeBasedPolicy(config);
Assert.assertEquals(policyLookback9Days.listSelectedVersions(Lists.newArrayList(version1, version2)).size(), 0);
}
@Test
public void testSelectBetweenTimebasedPolicy() {
Path dummyPath = new Path("dummy");
DateTime dt1 = new DateTime().minusDays(8);
DateTime dt2 = new DateTime().minusDays(6);
Config config =
ConfigFactory.parseMap(ImmutableMap.of(
SelectBetweenTimeBasedPolicy.TIME_BASED_SELECTION_MAX_LOOK_BACK_TIME_KEY, "7d",
SelectBetweenTimeBasedPolicy.TIME_BASED_SELECTION_MIN_LOOK_BACK_TIME_KEY, "4d"));
SelectBetweenTimeBasedPolicy policyLookback7Days = new SelectBetweenTimeBasedPolicy(config);
TimestampedDatasetVersion version1 = new TimestampedDatasetVersion(dt1, dummyPath);
TimestampedDatasetVersion version2 = new TimestampedDatasetVersion(dt2, dummyPath);
Assert.assertEquals(policyLookback7Days.listSelectedVersions(Lists.newArrayList(version1, version2)).size(), 1);
Assert.assertEquals(
Lists.newArrayList(policyLookback7Days.listSelectedVersions(Lists.newArrayList(version1, version2))).get(0),
version2);
config =
ConfigFactory.parseMap(ImmutableMap.of(
SelectBetweenTimeBasedPolicy.TIME_BASED_SELECTION_MAX_LOOK_BACK_TIME_KEY, "9d",
SelectBetweenTimeBasedPolicy.TIME_BASED_SELECTION_MIN_LOOK_BACK_TIME_KEY, "4d"));
SelectBetweenTimeBasedPolicy policyLookback9d4d = new SelectBetweenTimeBasedPolicy(config);
Assert.assertEquals(policyLookback9d4d.listSelectedVersions(Lists.newArrayList(version1, version2)).size(), 2);
config =
ConfigFactory.parseMap(ImmutableMap.of(
SelectBetweenTimeBasedPolicy.TIME_BASED_SELECTION_MAX_LOOK_BACK_TIME_KEY, "4d",
SelectBetweenTimeBasedPolicy.TIME_BASED_SELECTION_MIN_LOOK_BACK_TIME_KEY, "1d"));
SelectBetweenTimeBasedPolicy policyLookback4d1d = new SelectBetweenTimeBasedPolicy(config);
Assert.assertEquals(policyLookback4d1d.listSelectedVersions(Lists.newArrayList(version1, version2)).size(), 0);
config =
ConfigFactory.parseMap(ImmutableMap.of(
SelectBetweenTimeBasedPolicy.TIME_BASED_SELECTION_MAX_LOOK_BACK_TIME_KEY, "7d"));
SelectBetweenTimeBasedPolicy policyLookback7d0d = new SelectBetweenTimeBasedPolicy(config);
Assert.assertEquals(policyLookback7d0d.listSelectedVersions(Lists.newArrayList(version1, version2)).size(), 1);
Assert.assertEquals(
Lists.newArrayList(policyLookback7d0d.listSelectedVersions(Lists.newArrayList(version1, version2))).get(0),
version2);
}
}
| 2,376 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/util/commit/DeleteFileCommitStep.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.commit;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import javax.annotation.Nullable;
import lombok.Getter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.data.management.trash.Trash;
import org.apache.gobblin.data.management.trash.TrashFactory;
import org.apache.gobblin.util.PathUtils;
/**
* {@link CommitStep} to delete a set of paths in a {@link FileSystem}.
* If {@link #parentDeletionLimit} is present, will also delete newly empty parent directories up to but not including
* that limit.
*/
@Getter
public class DeleteFileCommitStep implements CommitStep {
private final Collection<FileStatus> pathsToDelete;
private final Properties properties;
private final URI fsUri;
private final Optional<Path> parentDeletionLimit;
public DeleteFileCommitStep(FileSystem fs, Path path, Properties properties) throws IOException {
this(fs, Lists.newArrayList(fs.getFileStatus(path)), properties, Optional.<Path>absent());
}
public static DeleteFileCommitStep fromPaths(FileSystem fs, Collection<Path> paths, Properties properties) throws
IOException {
return new DeleteFileCommitStep(fs, toFileStatus(fs, paths), properties, Optional.<Path>absent());
}
public static DeleteFileCommitStep fromPaths(FileSystem fs, Collection<Path> paths, Properties properties,
Path parentDeletionLimit) throws IOException {
return new DeleteFileCommitStep(fs, toFileStatus(fs, paths), properties, Optional.of(parentDeletionLimit));
}
/**
* @param fs {@link FileSystem} where files need to be deleted.
* @param paths Collection of {@link FileStatus}es to deleted.
* @param properties {@link Properties} object including {@link Trash} configuration.
* @param parentDeletionLimit if present, will delete empty parent directories up to but not including this path. If
* absent, will not delete empty parent directories.
* @throws IOException
*/
public DeleteFileCommitStep(FileSystem fs, Collection<FileStatus> paths, Properties properties,
Optional<Path> parentDeletionLimit) throws IOException {
this.fsUri = fs.getUri();
this.pathsToDelete = paths;
this.properties = properties;
this.parentDeletionLimit = parentDeletionLimit;
}
private static List<FileStatus> toFileStatus(FileSystem fs, Collection<Path> paths) throws IOException {
List<FileStatus> fileStatuses = Lists.newArrayList();
for (Path path : paths) {
fileStatuses.add(fs.getFileStatus(path));
}
return fileStatuses;
}
@Override public boolean isCompleted() throws IOException {
for (FileStatus pathToDelete : this.pathsToDelete) {
if (existsAndIsExpectedFile(pathToDelete)) {
return false;
}
}
return true;
}
@Override public void execute() throws IOException {
Trash trash = TrashFactory.createTrash(getFS(), this.properties);
Set<Path> parents = Sets.newHashSet();
for (FileStatus pathToDelete : this.pathsToDelete) {
if (existsAndIsExpectedFile(pathToDelete)) {
trash.moveToTrash(pathToDelete.getPath());
parents.add(pathToDelete.getPath().getParent());
}
}
if (this.parentDeletionLimit.isPresent()) {
for (Path parent : parents) {
PathUtils.deleteEmptyParentDirectories(getFS(), this.parentDeletionLimit.get(), parent);
}
}
}
/**
* Checks whether existing file in filesystem is the expected file (compares length and modificaiton time).
*/
private boolean existsAndIsExpectedFile(FileStatus status) throws IOException {
if (!getFS().exists(status.getPath())) {
return false;
}
FileStatus currentFileStatus = getFS().getFileStatus(status.getPath());
if (currentFileStatus.getLen() != status.getLen() ||
currentFileStatus.getModificationTime() > status.getModificationTime()) {
return false;
}
return true;
}
private FileSystem getFS() throws IOException {
return FileSystem.get(this.fsUri, new Configuration());
}
@Override
public String toString() {
return String.format("Delete the following files at %s: %s", this.fsUri,
Iterables.toString(Iterables.transform(this.pathsToDelete, new Function<FileStatus, Path>() {
@Nullable
@Override
public Path apply(@Nullable FileStatus input) {
return input != null ? input.getPath() : null;
}
})));
}
}
| 2,377 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/util/commit/SetPermissionCommitStep.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.commit;
import java.io.IOException;
import java.net.URI;
import java.util.Map;
import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.AccessControlException;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.data.management.copy.OwnerAndPermission;
/**
* An implementation of {@link CommitStep} for setting any file permissions.
* Current implementation only sets permissions, but it is capable of setting owner and group as well.
*/
@Slf4j
public class SetPermissionCommitStep implements CommitStep {
Map<String, OwnerAndPermission> pathAndPermissions;
private final URI fsUri;
public final boolean stopOnError;
public static final String STOP_ON_ERROR_KEY = "stop.on.error";
public static final String DEFAULT_STOP_ON_ERROR = "false";
private boolean isCompleted = false;
public SetPermissionCommitStep(FileSystem targetFs, Map<String, OwnerAndPermission> pathAndPermissions,
Properties props) {
this.pathAndPermissions = pathAndPermissions;
this.fsUri = targetFs.getUri();
this.stopOnError = Boolean.parseBoolean(props.getProperty(STOP_ON_ERROR_KEY, DEFAULT_STOP_ON_ERROR));
}
@Override
public boolean isCompleted() throws IOException {
return isCompleted;
}
@Override
public void execute() throws IOException {
FileSystem fs = FileSystem.get(this.fsUri, new Configuration());
for (Map.Entry<String, OwnerAndPermission> entry : pathAndPermissions.entrySet()) {
Path path = new Path(entry.getKey());
try {
log.info("Setting permission {} on path {}", entry.getValue().getFsPermission(), path);
fs.setPermission(path, entry.getValue().getFsPermission());
// TODO : we can also set owner and group here.
} catch (AccessControlException e) {
log.warn("Error while setting permission on " + path, e);
if (this.stopOnError) {
log.info("Skip setting rest of the permissions because stopOnError is true.");
break;
}
}
}
isCompleted = true;
}
} | 2,378 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/util/request_allocation/SimpleHiveDatasetTieringPrioritizer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.request_allocation;
import java.io.IOException;
import java.io.Serializable;
import java.util.Comparator;
import java.util.Map;
import java.util.Properties;
import java.util.TreeMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.google.common.collect.Maps;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.data.management.copy.AllEqualComparator;
import org.apache.gobblin.data.management.copy.CopyEntity;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.data.management.copy.hive.WhitelistBlacklist;
import org.apache.gobblin.data.management.copy.prioritization.FileSetComparator;
import org.apache.gobblin.data.management.partition.CopyableDatasetRequestor;
import org.apache.gobblin.data.management.partition.FileSet;
import org.apache.gobblin.dataset.Dataset;
import lombok.AllArgsConstructor;
/**
* A simple {@link FileSetComparator} for {@link HiveDataset}s that allows classifying datasets into tiers, so that lower
* tiers are higher priority.
*
* Usage:
* {@link #TIER_KEY}.<tier-number>=<whitelist-blacklist-pattern>
* Example:
* {@link #TIER_KEY}.0 = importantdb
* {@link #TIER_KEY}.1 = otherdb,thirddb
*/
@Alias(value = "HiveSimpleTiering")
public class SimpleHiveDatasetTieringPrioritizer extends SimpleHierarchicalPrioritizer<FileSet<CopyEntity>>
implements FileSetComparator, Serializable {
public static final String CONFIGURATION_PREFIX = "gobblin.prioritizer.hiveDatasetTiering";
public static final String TIER_KEY = CONFIGURATION_PREFIX + ".tier";
private static final Pattern TIER_PATTERN = Pattern.compile(TIER_KEY + "\\.([0-9]+)");
public SimpleHiveDatasetTieringPrioritizer(Properties properties) throws IOException {
super(SimpleHiveDatasetTieringPrioritizer.createRequestorComparator(properties), new AllEqualComparator<FileSet<CopyEntity>>());
}
private static Comparator<Requestor<FileSet<CopyEntity>>> createRequestorComparator(Properties props)
throws IOException {
TreeMap<Integer, WhitelistBlacklist> tiers = Maps.newTreeMap();
Matcher matcher;
for (Map.Entry<Object, Object> entry : props.entrySet()) {
if (entry.getKey() instanceof String && entry.getValue() instanceof String
&& (matcher = TIER_PATTERN.matcher((String) entry.getKey())).matches()) {
int tier = Integer.parseInt(matcher.group(1));
WhitelistBlacklist whitelistBlacklist = new WhitelistBlacklist((String)entry.getValue(), "");
tiers.put(tier, whitelistBlacklist);
}
}
return new TierComparator(tiers);
}
@AllArgsConstructor
private static class TierComparator implements Comparator<Requestor<FileSet<CopyEntity>>>, Serializable {
private final TreeMap<Integer, WhitelistBlacklist> tiersMap;
@Override
public int compare(Requestor<FileSet<CopyEntity>> o1, Requestor<FileSet<CopyEntity>> o2) {
return Integer.compare(findTier(o1), findTier(o2));
}
private int findTier(Requestor<FileSet<CopyEntity>> requestor) {
if (!(requestor instanceof CopyableDatasetRequestor)) {
throw new ClassCastException(String.format("%s can only be used for %s.",
SimpleHiveDatasetTieringPrioritizer.class.getName(), CopyableDatasetRequestor.class.getName()));
}
Dataset dataset = ((CopyableDatasetRequestor) requestor).getDataset();
if (!(dataset instanceof HiveDataset)) {
throw new ClassCastException(String.format("%s can only be used for %s.",
SimpleHiveDatasetTieringPrioritizer.class.getName(), HiveDataset.class.getName()));
}
HiveDataset hiveDataset = (HiveDataset) dataset;
for (Map.Entry<Integer, WhitelistBlacklist> tier : tiersMap.entrySet()) {
WhitelistBlacklist whitelistBlacklist = tier.getValue();
if (whitelistBlacklist.acceptTable(hiveDataset.getTable().getDbName(), hiveDataset.getTable().getTableName())) {
return tier.getKey();
}
}
return Integer.MAX_VALUE;
}
}
}
| 2,379 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/util/schema_check/AvroSchemaCheckStrategy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.schema_check;
import lombok.extern.slf4j.Slf4j;
import org.apache.avro.Schema;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* The strategy to compare Avro schema.
*/
public interface AvroSchemaCheckStrategy {
/**
* A factory to initiate the Strategy
*/
@Slf4j
class AvroSchemaCheckStrategyFactory {
/**
* Use the configuration to create a schema check strategy. If it's not found, return null.
* @param state
* @return
*/
public static AvroSchemaCheckStrategy create(WorkUnitState state)
{
try {
return (AvroSchemaCheckStrategy) Class.forName(state.getProp(ConfigurationKeys.AVRO_SCHEMA_CHECK_STRATEGY, ConfigurationKeys.AVRO_SCHEMA_CHECK_STRATEGY_DEFAULT)).newInstance();
} catch (Exception e) {
log.error(e.getMessage());
return null;
}
}
}
/**
* Make sure schema toValidate and expected have matching names and types.
* @param toValidate The real schema
* @param expected The expected schema
* @return
*/
boolean compare(Schema expected, Schema toValidate);
}
| 2,380 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/util | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/util/schema_check/AvroSchemaCheckDefaultStrategy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.schema_check;
import java.util.HashSet;
import java.util.Set;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
/**
* default strategy to check the compatibility of avro schema
*/
public class AvroSchemaCheckDefaultStrategy implements AvroSchemaCheckStrategy {
/**
* This method will compare the name and types of the two schema
* @param expected The expected schema
* @param toValidate The real schema
* @return true when expected schema and toValidate schema have matching field names and types
*/
public boolean compare(Schema expected, Schema toValidate)
{
if (toValidate.getType() != expected.getType() || !toValidate.getName().equals(expected.getName())) {return false;}
else {
switch (toValidate.getType()) {
case NULL:
case BOOLEAN:
case INT:
case LONG:
case FLOAT:
case DOUBLE:
case BYTES:
case STRING: {
return true;
}
case ARRAY: {
return compare(toValidate.getElementType(), expected.getElementType());
}
case MAP: {
return compare(toValidate.getValueType(), expected.getValueType());
}
case FIXED: {
// fixed size and name must match:
if (toValidate.getFixedSize() != expected.getFixedSize()) {
return false;
}
return true;
}
case ENUM: {
// expected symbols must contain all toValidate symbols:
final Set<String> expectedSymbols = new HashSet<>(expected.getEnumSymbols());
final Set<String> toValidateSymbols = new HashSet<String>(toValidate.getEnumSymbols());
if (expectedSymbols.size() != toValidateSymbols.size()) {
return false;
}
if (!expectedSymbols.containsAll(toValidateSymbols)) {
return false;
}
return true;
}
case RECORD: {
// Check that each field of toValidate schema is in expected schema
if (toValidate.getFields().size() != expected.getFields().size()) {
return false;
}
for (final Schema.Field expectedFiled : expected.getFields()) {
final Schema.Field toValidateField = toValidate.getField(expectedFiled.name());
if (toValidateField == null) {
// expected field does not correspond to any field in the toValidate record schema
return false;
} else {
if (!compare(toValidateField.schema(), expectedFiled.schema())) {
return false;
}
}
}
return true;
}
case UNION: {
// Check existing schema contains all the type in toValidate schema
if (toValidate.getTypes().size() != expected.getTypes().size()) {
return false;
}
HashSet<Schema> types = new HashSet<Schema>(expected.getTypes());
for (Schema toValidateType : toValidate.getTypes()) {
Schema equalSchema = null;
for (Schema type : types) {
if (compare(type, toValidateType)) {
equalSchema = type;
break;
}
}
if (equalSchema == null) {
return false;
}
types.remove(equalSchema);
}
return true;
}
default: {
throw new AvroRuntimeException("Unknown schema type: " + toValidate.getType());
}
}
}
}
}
| 2,381 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/runtime/retention/DatasetCleanerTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.retention;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.retention.DatasetCleaner;
import org.apache.gobblin.runtime.TaskContext;
import org.apache.gobblin.runtime.task.BaseAbstractTask;
import org.apache.gobblin.util.JobConfigurationUtils;
/**
* A task that runs a DatasetCleaner job.
*/
@Slf4j
public class DatasetCleanerTask extends BaseAbstractTask {
private static final String JOB_CONFIGURATION_PREFIX = "datasetCleaner";
private final TaskContext taskContext;
public DatasetCleanerTask(TaskContext taskContext) {
super(taskContext);
this.taskContext = taskContext;
}
@Override
public void run(){
DatasetCleaner datasetCleaner = null;
try {
Configuration conf = new Configuration();
JobConfigurationUtils.putStateIntoConfiguration(this.taskContext.getTaskState(), conf);
datasetCleaner = new DatasetCleaner(FileSystem.get(conf), this.taskContext.getTaskState().getProperties());
datasetCleaner.clean();
this.workingState = WorkUnitState.WorkingState.SUCCESSFUL;
} catch (IOException e) {
this.workingState = WorkUnitState.WorkingState.FAILED;
throw new RuntimeException(e);
} finally {
if (datasetCleaner != null) {
try {
datasetCleaner.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
}
@Override
public void commit() {
log.info("task {} commits with state {}", this.taskContext.getTaskState().getTaskId(), this.workingState);
}
}
| 2,382 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/runtime/retention/DatasetCleanerTaskFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.retention;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.publisher.NoopPublisher;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.TaskContext;
import org.apache.gobblin.runtime.task.TaskFactory;
import org.apache.gobblin.runtime.task.TaskIFace;
/**
* A {@link TaskFactory} that creates a {@link DatasetCleanerTask}. There is no data publish phase for this task, so this
* factory uses a {@link NoopPublisher}.
*/
public class DatasetCleanerTaskFactory implements TaskFactory {
@Override
public TaskIFace createTask(TaskContext taskContext) {
return new DatasetCleanerTask(taskContext);
}
@Override
public DataPublisher createDataPublisher(JobState.DatasetState datasetState) {
return new NoopPublisher(datasetState);
}
}
| 2,383 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/runtime/retention/DatasetCleanerCli.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.retention;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.data.management.retention.DatasetCleaner;
import org.apache.gobblin.runtime.cli.CliApplication;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.Properties;
@Alias(value = "cleaner", description = "Data retention utility")
public class DatasetCleanerCli implements CliApplication {
private static final Option CLEANER_CONFIG =
Option.builder("c").longOpt("config").hasArg().required().desc("DatasetCleaner configuration").build();
@Override
public void run(String[] args) {
DatasetCleaner datasetCleaner = null;
try {
Properties properties = readProperties(parseConfigLocation(args));
datasetCleaner = new DatasetCleaner(FileSystem.get(new Configuration()), properties);
datasetCleaner.clean();
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
if (datasetCleaner != null) {
try {
datasetCleaner.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
}
private Properties readProperties(String fileLocation) {
try {
Properties prop = new Properties();
FileInputStream input = new FileInputStream(fileLocation);
prop.load(input);
input.close();
return prop;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private String parseConfigLocation(String[] args) {
Options options = new Options();
options.addOption(CLEANER_CONFIG);
CommandLine cli;
try {
CommandLineParser parser = new DefaultParser();
cli = parser.parse(options, Arrays.copyOfRange(args, 1, args.length));
} catch (ParseException pe) {
System.out.println("Command line parse exception: " + pe.getMessage());
printUsage(options);
throw new RuntimeException(pe);
}
return cli.getOptionValue(CLEANER_CONFIG.getOpt());
}
private void printUsage(Options options) {
HelpFormatter formatter = new HelpFormatter();
String usage = "DatasetCleaner configuration ";
formatter.printHelp(usage, options);
}
}
| 2,384 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/runtime/embedded/EmbeddedGobblinDistcp.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.embedded;
import java.io.IOException;
import java.net.URISyntaxException;
import org.apache.commons.cli.CommandLine;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.data.management.copy.CopySource;
import org.apache.gobblin.data.management.copy.RecursiveCopyableDataset;
import org.apache.gobblin.runtime.api.JobTemplate;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.runtime.cli.CliObjectOption;
import org.apache.gobblin.runtime.cli.NotOnCli;
import org.apache.gobblin.runtime.cli.PublicMethodsGobblinCliFactory;
import org.apache.gobblin.runtime.template.ResourceBasedJobTemplate;
/**
* Embedded version of distcp.
* Usage:
* new EmbeddedGobblinDistcp(new Path("/source"), new Path("/dest")).run();
*/
public class EmbeddedGobblinDistcp extends EmbeddedGobblin {
@Alias(value = "distcp", description = "Distributed copy between Hadoop compatibly file systems.")
public static class CliFactory extends PublicMethodsGobblinCliFactory {
public CliFactory() {
super(EmbeddedGobblinDistcp.class);
}
@Override
public EmbeddedGobblin constructEmbeddedGobblin(CommandLine cli) throws JobTemplate.TemplateException, IOException {
String[] leftoverArgs = cli.getArgs();
if (leftoverArgs.length != 2) {
throw new RuntimeException("Unexpected number of arguments.");
}
Path from = new Path(leftoverArgs[0]);
Path to = new Path(leftoverArgs[1]);
return new EmbeddedGobblinDistcp(from, to);
}
@Override
public String getUsageString() {
return "[OPTIONS] <source> <target>";
}
}
// For backward-compatibility, default to distcp.template
public EmbeddedGobblinDistcp(Path from, Path to) throws JobTemplate.TemplateException, IOException {
this("templates/distcp.template", from, to);
}
// An interface to load specified template.
public EmbeddedGobblinDistcp(String templateLoc, Path from, Path to) throws JobTemplate.TemplateException, IOException {
super("Distcp");
try {
setTemplate(ResourceBasedJobTemplate.forResourcePath(templateLoc));
} catch (URISyntaxException | SpecNotFoundException exc) {
throw new RuntimeException("Could not instantiate an " + EmbeddedGobblinDistcp.class.getName(), exc);
}
this.setConfiguration("from", from.toString());
this.setConfiguration("to", to.toString());
// Infer source and target fs uris from the input paths
this.setConfiguration(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, from.getFileSystem(new Configuration()).getUri().toString());
this.setConfiguration(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, to.getFileSystem(new Configuration()).getUri().toString());
// add gobblin-data-management jar to distributed jars
this.distributeJarByClassWithPriority(CopySource.class, 0);
}
/**
* Specifies that files in the target should be updated if they have changed in the source. Equivalent to -update
* option in Hadoop distcp.
*/
@CliObjectOption(description = "Specifies files should be updated if they're different in the source.")
public EmbeddedGobblinDistcp update() {
this.setConfiguration(RecursiveCopyableDataset.UPDATE_KEY, Boolean.toString(true));
return this;
}
/**
* Specifies that files in the target that don't exist in the source should be deleted. Equivalent to -delete
* option in Hadoop distcp.
*/
@CliObjectOption(description = "Delete files in target that don't exist on source.")
public EmbeddedGobblinDistcp delete() {
this.setConfiguration(RecursiveCopyableDataset.DELETE_KEY, Boolean.toString(true));
return this;
}
/**
* If {@link #delete()} is used, specifies that newly empty parent directories should also be deleted.
*/
@CliObjectOption(description = "If deleting files on target, also delete newly empty parent directories.")
public EmbeddedGobblinDistcp deleteEmptyParentDirectories() {
this.setConfiguration(RecursiveCopyableDataset.DELETE_EMPTY_DIRECTORIES_KEY, Boolean.toString(true));
return this;
}
/**
* Run in simulate mode. Will log everythin it would copy, but not actually copy anything.
*/
public EmbeddedGobblinDistcp simulate() {
this.setConfiguration(CopySource.SIMULATE, Boolean.toString(true));
return this;
}
// Remove template from CLI
@Override
@NotOnCli
public EmbeddedGobblin setTemplate(String templateURI)
throws URISyntaxException, SpecNotFoundException, JobTemplate.TemplateException {
return super.setTemplate(templateURI);
}
}
| 2,385 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/RetentionEvents.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention;
import org.apache.gobblin.data.management.retention.dataset.CleanableDataset;
/**
* Holds event names and constants used in events submitted by a retention job.
*/
class RetentionEvents {
/**
* This event is submitted when {@link CleanableDataset#clean()} throws an exception
*/
static class CleanFailed {
static final String EVENT_NAME = "CleanFailed";
/**
* Value for this key will be a stacktrace of any exception caused while deleting a dataset
*/
static final String FAILURE_CONTEXT_METADATA_KEY = "failureContext";
}
static final String NAMESPACE = "gobblin.data.management.retention";
static final String DATASET_URN_METADATA_KEY = "datasetUrn";
}
| 2,386 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/DatasetCleaner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention;
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.hadoop.fs.FileSystem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.codahale.metrics.Meter;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.DynamicConfigGenerator;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.retention.dataset.CleanableDataset;
import org.apache.gobblin.data.management.retention.profile.MultiCleanableDatasetFinder;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.instrumented.Instrumentable;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.runtime.DynamicConfigGeneratorFactory;
import org.apache.gobblin.util.AzkabanTags;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.RateControlledFileSystem;
import org.apache.gobblin.util.WriterUtils;
import org.apache.gobblin.util.executors.ScalingThreadPoolExecutor;
/**
* Finds existing versions of datasets and cleans old or deprecated versions.
*/
public class DatasetCleaner implements Instrumentable, Closeable {
public static final String CONFIGURATION_KEY_PREFIX = "gobblin.retention.";
public static final String MAX_CONCURRENT_DATASETS_CLEANED =
CONFIGURATION_KEY_PREFIX + "max.concurrent.datasets.cleaned";
public static final String DATASET_CLEAN_HDFS_CALLS_PER_SECOND_LIMIT =
CONFIGURATION_KEY_PREFIX + "hdfs.calls.per.second.limit";
public static final String DEFAULT_MAX_CONCURRENT_DATASETS_CLEANED = "100";
private static Logger LOG = LoggerFactory.getLogger(DatasetCleaner.class);
private final DatasetsFinder<Dataset> datasetFinder;
private final ListeningExecutorService service;
private final Closer closer;
private final boolean isMetricEnabled;
private MetricContext metricContext;
private final EventSubmitter eventSubmitter;
private Optional<Meter> datasetsCleanSuccessMeter = Optional.absent();
private Optional<Meter> datasetsCleanFailureMeter = Optional.absent();
private Optional<CountDownLatch> finishCleanSignal;
private final List<Throwable> throwables;
public DatasetCleaner(FileSystem fs, Properties props) throws IOException {
Properties properties = new Properties();
properties.putAll(props);
// load dynamic configuration and add them to the job properties
Config propsAsConfig = ConfigUtils.propertiesToConfig(props);
DynamicConfigGenerator dynamicConfigGenerator =
DynamicConfigGeneratorFactory.createDynamicConfigGenerator(propsAsConfig);
Config dynamicConfig = dynamicConfigGenerator.generateDynamicConfig(propsAsConfig);
properties.putAll(ConfigUtils.configToProperties(dynamicConfig));
State state = new State(properties);
FileSystem targetFs =
properties.containsKey(ConfigurationKeys.WRITER_FILE_SYSTEM_URI) ? WriterUtils.getWriterFs(state) : fs;
this.closer = Closer.create();
// TODO -- Remove the dependency on gobblin-core after new Gobblin Metrics does not depend on gobblin-core.
List<Tag<?>> tags = Lists.newArrayList();
tags.addAll(Tag.fromMap(AzkabanTags.getAzkabanTags()));
this.metricContext =
this.closer.register(Instrumented.getMetricContext(state, DatasetCleaner.class, tags));
this.isMetricEnabled = GobblinMetrics.isEnabled(properties);
this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, RetentionEvents.NAMESPACE).build();
try {
FileSystem optionalRateControlledFs = targetFs;
if (properties.contains(DATASET_CLEAN_HDFS_CALLS_PER_SECOND_LIMIT)) {
optionalRateControlledFs = this.closer.register(new RateControlledFileSystem(targetFs,
Long.parseLong(properties.getProperty(DATASET_CLEAN_HDFS_CALLS_PER_SECOND_LIMIT))));
((RateControlledFileSystem) optionalRateControlledFs).startRateControl();
}
this.datasetFinder = new MultiCleanableDatasetFinder(optionalRateControlledFs, properties, eventSubmitter);
} catch (NumberFormatException exception) {
throw new IOException(exception);
} catch (ExecutionException exception) {
throw new IOException(exception);
}
ExecutorService executor = ScalingThreadPoolExecutor.newScalingThreadPool(0,
Integer.parseInt(properties.getProperty(MAX_CONCURRENT_DATASETS_CLEANED, DEFAULT_MAX_CONCURRENT_DATASETS_CLEANED)),
100, ExecutorsUtils.newThreadFactory(Optional.of(LOG), Optional.of("Dataset-cleaner-pool-%d")));
this.service = ExecutorsUtils.loggingDecorator(executor);
this.throwables = Lists.newArrayList();
}
/**
* Perform the cleanup of old / deprecated dataset versions.
* @throws IOException
*/
public void clean() throws IOException {
List<Dataset> dataSets = this.datasetFinder.findDatasets();
this.finishCleanSignal = Optional.of(new CountDownLatch(dataSets.size()));
for (final Dataset dataset : dataSets) {
ListenableFuture<Void> future = this.service.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
if (dataset instanceof CleanableDataset) {
((CleanableDataset) dataset).clean();
}
return null;
}
});
Futures.addCallback(future, new FutureCallback<Void>() {
@Override
public void onFailure(Throwable throwable) {
DatasetCleaner.this.finishCleanSignal.get().countDown();
LOG.warn("Exception caught when cleaning " + dataset.datasetURN() + ".", throwable);
DatasetCleaner.this.throwables.add(throwable);
Instrumented.markMeter(DatasetCleaner.this.datasetsCleanFailureMeter);
DatasetCleaner.this.eventSubmitter.submit(RetentionEvents.CleanFailed.EVENT_NAME,
ImmutableMap.of(RetentionEvents.CleanFailed.FAILURE_CONTEXT_METADATA_KEY,
ExceptionUtils.getFullStackTrace(throwable), RetentionEvents.DATASET_URN_METADATA_KEY,
dataset.datasetURN()));
}
@Override
public void onSuccess(Void arg0) {
DatasetCleaner.this.finishCleanSignal.get().countDown();
LOG.info("Successfully cleaned: " + dataset.datasetURN());
Instrumented.markMeter(DatasetCleaner.this.datasetsCleanSuccessMeter);
}
});
}
}
@Override
public void close() throws IOException {
try {
if (this.finishCleanSignal != null && this.finishCleanSignal.isPresent()) {
this.finishCleanSignal.get().await();
}
if (!this.throwables.isEmpty()) {
for (Throwable t : this.throwables) {
LOG.error("Failed clean due to ", t);
}
throw new RuntimeException("Clean failed for one or more datasets");
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException("Not all datasets finish cleanning", e);
} finally {
ExecutorsUtils.shutdownExecutorService(this.service, Optional.of(LOG));
this.closer.close();
}
}
@Override
public MetricContext getMetricContext() {
return this.metricContext;
}
@Override
public boolean isInstrumentationEnabled() {
return this.isMetricEnabled;
}
/** Default with no additional tags */
@Override
public List<Tag<?>> generateTags(State state) {
return Lists.newArrayList();
}
@Override
public void switchMetricContext(List<Tag<?>> tags) {
this.metricContext = this.closer.register(
Instrumented.newContextFromReferenceContext(this.metricContext, tags, Optional.<String>absent()));
this.regenerateMetrics();
}
@Override
public void switchMetricContext(MetricContext context) {
this.metricContext = context;
this.regenerateMetrics();
}
/**
* Generates metrics for the instrumentation of this class.
*/
protected void regenerateMetrics() {
if (isInstrumentationEnabled()) {
this.datasetsCleanFailureMeter =
Optional.of(this.metricContext.meter(DatasetCleanerMetrics.DATASETS_CLEAN_FAILURE));
this.datasetsCleanSuccessMeter =
Optional.of(this.metricContext.meter(DatasetCleanerMetrics.DATASETS_CLEAN_SUCCESS));
}
}
public static class DatasetCleanerMetrics {
public static final String DATASETS_CLEAN_SUCCESS = "gobblin.retention.datasets.clean.success";
public static final String DATASETS_CLEAN_FAILURE = "gobblin.retention.datasets.clean.failure";
}
}
| 2,387 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset/CleanableHiveDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.dataset;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigRenderOptions;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.data.management.policy.SelectBeforeTimeBasedPolicy;
import org.apache.gobblin.data.management.policy.VersionSelectionPolicy;
import org.apache.gobblin.data.management.retention.version.HiveDatasetVersionCleaner;
import org.apache.gobblin.data.management.version.HiveDatasetVersion;
import org.apache.gobblin.data.management.version.finder.AbstractHiveDatasetVersionFinder;
import org.apache.gobblin.data.management.version.finder.DatePartitionHiveVersionFinder;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* <p>
* A {@link HiveDataset} used for Retention. A {@link HiveDataset} represents a hive table and a {@link HiveDatasetVersion}
* represents a hive partition of this table.
* </p>
*
* <ul>
* <li>A version finder at {@value #VERSION_FINDER_CLASS_KEY} is used to find all the partitions the dataset
* <li>A selection policy at {@value #SELECTION_POLICY_CLASS_KEY} is applied on all these partitions to get the partitions to be deleted.
* <li>These selected partitions are dropped in the hive metastore and all the data on FileSystem is also deleted
* </ul>
*
*/
@Slf4j
@SuppressWarnings({ "rawtypes", "unchecked" })
@Getter
public class CleanableHiveDataset extends HiveDataset implements CleanableDataset, FileSystemDataset {
private static final String SHOULD_DELETE_DATA_KEY = "gobblin.retention.hive.shouldDeleteData";
private static final String SHOULD_DELETE_DATA_DEFAULT = Boolean.toString(false);
private static final String VERSION_FINDER_CLASS_KEY = "version.finder.class";
private static final String DEFAULT_VERSION_FINDER_CLASS = DatePartitionHiveVersionFinder.class.getName();
private static final String SELECTION_POLICY_CLASS_KEY = "selection.policy.class";
private static final String DEFAULT_SELECTION_POLICY_CLASS = SelectBeforeTimeBasedPolicy.class.getName();
private final VersionSelectionPolicy hiveSelectionPolicy;
private final AbstractHiveDatasetVersionFinder hiveDatasetVersionFinder;
private final boolean simulate;
private final boolean shouldDeleteData;
private final FsCleanableHelper fsCleanableHelper;
public CleanableHiveDataset(FileSystem fs, HiveMetastoreClientPool clientPool, Table table, Properties jobProps,
Config config) throws IOException {
super(fs, clientPool, table, jobProps, config);
try {
this.hiveSelectionPolicy =
(VersionSelectionPolicy) GobblinConstructorUtils.invokeFirstConstructor(Class.forName(ConfigUtils.getString(
this.datasetConfig, SELECTION_POLICY_CLASS_KEY, DEFAULT_SELECTION_POLICY_CLASS)), ImmutableList.<Object> of(
this.datasetConfig, jobProps), ImmutableList.<Object> of(this.datasetConfig), ImmutableList.<Object> of(jobProps));
log.info(String.format("Configured selection policy %s for dataset:%s with config %s",
ConfigUtils.getString(this.datasetConfig, SELECTION_POLICY_CLASS_KEY, DEFAULT_SELECTION_POLICY_CLASS),
datasetURN(), this.datasetConfig.root().render(ConfigRenderOptions.concise())));
this.hiveDatasetVersionFinder =
(AbstractHiveDatasetVersionFinder) GobblinConstructorUtils.invokeFirstConstructor(Class.forName(ConfigUtils
.getString(this.datasetConfig, VERSION_FINDER_CLASS_KEY, DEFAULT_VERSION_FINDER_CLASS)), ImmutableList
.<Object> of(this.fs, this.datasetConfig), ImmutableList.<Object> of(this.fs, jobProps));
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
log.error("Failed to instantiate CleanableHiveDataset", e);
throw new IllegalArgumentException(e);
}
this.fsCleanableHelper = new FsCleanableHelper(fs, jobProps, this.datasetConfig, log);
this.shouldDeleteData = Boolean.valueOf(jobProps.getProperty(SHOULD_DELETE_DATA_KEY, SHOULD_DELETE_DATA_DEFAULT));
this.simulate = Boolean.valueOf(jobProps.getProperty(FsCleanableHelper.SIMULATE_KEY, FsCleanableHelper.SIMULATE_DEFAULT));
}
/**
* Drops the partitions selected by {@link #hiveSelectionPolicy}. Also deletes the data associated with it.
* <p>
* If an {@link Exception} occurs while processing a {@link Partition}, other {@link Partition}s will still be deleted.
* However, a {@link RuntimeException} is thrown at the end if there was at least one {@link Exception}.
* </p>
*/
@Override
public void clean() throws IOException {
List versions = Lists.newArrayList(this.hiveDatasetVersionFinder.findDatasetVersions(this));
if (versions.isEmpty()) {
log.warn(String.format("No dataset version can be found. Ignoring %s", this.getTable().getCompleteName()));
return;
}
Collections.sort(versions, Collections.reverseOrder());
Collection<HiveDatasetVersion> deletableVersions = this.hiveSelectionPolicy.listSelectedVersions(versions);
log.info(String.format("Cleaning dataset %s .Will drop %s out of %s partitions.", datasetURN(), deletableVersions.size(),
versions.size()));
List<Exception> exceptions = Lists.newArrayList();
for (HiveDatasetVersion hiveDatasetVersion : deletableVersions) {
try {
// Initialize the version cleaner
HiveDatasetVersionCleaner hiveDatasetVersionCleaner = new HiveDatasetVersionCleaner(hiveDatasetVersion, this);
// Perform pre-clean actions
hiveDatasetVersionCleaner.preCleanAction();
// Perform actual cleaning
hiveDatasetVersionCleaner.clean();
// Perform post-clean actions eg. swap partitions
hiveDatasetVersionCleaner.postCleanAction();
} catch (IOException e) {
exceptions.add(e);
}
}
if (!exceptions.isEmpty()) {
throw new RuntimeException(String.format("Deletion failed for %s partitions", exceptions.size()));
}
}
@Override
public Path datasetRoot() {
return super.getTable().getDataLocation();
}
}
| 2,388 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset/MultiVersionCleanableDatasetBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.dataset;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import lombok.Singular;
import org.apache.hadoop.fs.FileSystem;
import org.slf4j.Logger;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.data.management.policy.EmbeddedRetentionSelectionPolicy;
import org.apache.gobblin.data.management.policy.SelectNothingPolicy;
import org.apache.gobblin.data.management.policy.VersionSelectionPolicy;
import org.apache.gobblin.data.management.retention.action.RetentionAction;
import org.apache.gobblin.data.management.retention.policy.RetentionPolicy;
import org.apache.gobblin.data.management.trash.ProxiedTrash;
import org.apache.gobblin.data.management.version.DatasetVersion;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.finder.VersionFinder;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.util.ConfigUtils;
/**
* A {@link CleanableDataset} that may have multiple {@link VersionFinder}, {@link VersionSelectionPolicy}
* and {@link RetentionAction}s. Retention needs to performed for different kinds of {@link DatasetVersion}s. Each
* kind of {@link DatasetVersion} can have its own {@link VersionSelectionPolicy} and/or {@link RetentionAction}
* associated with it.
* <ul>
* <li>{@link MultiVersionCleanableDatasetBase#getVersionFindersAndPolicies()} gets a list {@link VersionFinderAndPolicy}s
* <li>Each {@link VersionFinderAndPolicy} contains a {@link VersionFinder} and a {@link VersionSelectionPolicy}. It can
* optionally have a {@link RetentionAction}
* <li>The {@link MultiVersionCleanableDatasetBase#clean()} method finds all the {@link FileSystemDatasetVersion}s using
* {@link VersionFinderAndPolicy#versionFinder}
* <li> It gets the deletable {@link FileSystemDatasetVersion}s by applying {@link VersionFinderAndPolicy#versionSelectionPolicy}.
* These deletable version are deleted and then deletes empty parent directories.
* <li>If additional retention actions are available at {@link VersionFinderAndPolicy#getRetentionActions()}, all versions
* found by the {@link VersionFinderAndPolicy#versionFinder} are passed to {@link RetentionAction#execute(List)} for
* each {@link RetentionAction}
* </ul>
*
* <p>
* Concrete subclasses should implement {@link #getVersionFindersAndPolicies()}
* </p>
*
* <p>
* Datasets are directories in the filesystem containing data files organized in version-like directory structures.
* Example datasets:
* </p>
*
* <p>
* For snapshot based datasets, with the directory structure:
* <pre>
* /path/to/table/
* snapshot1/
* dataFiles...
* snapshot2/
* dataFiles...
* </pre>
* each of snapshot1 and snapshot2 are dataset versions.
* </p>
*
* <p>
* For tracking datasets, with the directory structure:
* <pre>
* /path/to/tracking/data/
* 2015/
* 06/
* 01/
* dataFiles...
* 02/
* dataFiles...
* </pre>
* each of 2015/06/01 and 2015/06/02 are dataset versions.
* </p>
*
* @param <T> type of {@link FileSystemDatasetVersion} supported by this {@link CleanableDataset}.
*/
public abstract class MultiVersionCleanableDatasetBase<T extends FileSystemDatasetVersion>
implements CleanableDataset, FileSystemDataset {
/**
* @deprecated in favor of {@link FsCleanableHelper}
*/
@Deprecated
public static final String CONFIGURATION_KEY_PREFIX = FsCleanableHelper.CONFIGURATION_KEY_PREFIX;
/**
* @deprecated in favor of {@link FsCleanableHelper}
*/
@Deprecated
public static final String SIMULATE_KEY = FsCleanableHelper.SIMULATE_KEY;
public static final String SIMULATE_DEFAULT = FsCleanableHelper.SIMULATE_DEFAULT;
/**
* @deprecated in favor of {@link FsCleanableHelper}
*/
@Deprecated
public static final String SKIP_TRASH_KEY = FsCleanableHelper.SKIP_TRASH_KEY;
public static final String SKIP_TRASH_DEFAULT = FsCleanableHelper.SKIP_TRASH_DEFAULT;
/**
* @deprecated in favor of {@link FsCleanableHelper}
*/
@Deprecated
public static final String DELETE_EMPTY_DIRECTORIES_KEY = FsCleanableHelper.DELETE_EMPTY_DIRECTORIES_KEY;
public static final String DELETE_EMPTY_DIRECTORIES_DEFAULT = FsCleanableHelper.DELETE_EMPTY_DIRECTORIES_DEFAULT;
/**
* @deprecated in favor of {@link FsCleanableHelper}
*/
@Deprecated
public static final String DELETE_AS_OWNER_KEY = FsCleanableHelper.DELETE_AS_OWNER_KEY;
public static final String DELETE_AS_OWNER_DEFAULT = FsCleanableHelper.DELETE_AS_OWNER_DEFAULT;
public static final String IS_DATASET_BLACKLISTED_KEY = CONFIGURATION_KEY_PREFIX + "dataset.is.blacklisted";
public static final String IS_DATASET_BLACKLISTED_DEFAULT = Boolean.toString(false);
protected final FileSystem fs;
/**
* @deprecated in favor of {@link FsCleanableHelper}
*/
@Deprecated
protected final ProxiedTrash trash;
@Getter
@VisibleForTesting
protected final boolean isDatasetBlacklisted;
private final FsCleanableHelper fsCleanableHelper;
protected final Logger log;
/**
* @deprecated in favor of {@link FsCleanableHelper}
*/
@Deprecated
protected final boolean simulate;
/**
* @deprecated in favor of {@link FsCleanableHelper}
*/
@Deprecated
protected final boolean skipTrash;
/**
* @deprecated in favor of {@link FsCleanableHelper}
*/
@Deprecated
protected final boolean deleteEmptyDirectories;
/**
* @deprecated in favor of {@link FsCleanableHelper}
*/
@Deprecated
protected final boolean deleteAsOwner;
/**
* Get {@link org.apache.gobblin.data.management.retention.policy.RetentionPolicy} to use.
*/
public abstract List<VersionFinderAndPolicy<T>> getVersionFindersAndPolicies();
public MultiVersionCleanableDatasetBase(final FileSystem fs, final Properties props, Config config, Logger log)
throws IOException {
this(fs, props, Boolean.valueOf(props.getProperty(SIMULATE_KEY, SIMULATE_DEFAULT)),
Boolean.valueOf(props.getProperty(SKIP_TRASH_KEY, SKIP_TRASH_DEFAULT)),
Boolean.valueOf(props.getProperty(DELETE_EMPTY_DIRECTORIES_KEY, DELETE_EMPTY_DIRECTORIES_DEFAULT)),
Boolean.valueOf(props.getProperty(DELETE_AS_OWNER_KEY, DELETE_AS_OWNER_DEFAULT)),
ConfigUtils.getBoolean(config, IS_DATASET_BLACKLISTED_KEY, Boolean.valueOf(IS_DATASET_BLACKLISTED_DEFAULT)), log);
}
public MultiVersionCleanableDatasetBase(final FileSystem fs, final Properties props, Logger log) throws IOException {
// This constructor is used by retention jobs configured through job configs and do not use dataset configs from config store.
// IS_DATASET_BLACKLISTED_KEY is only available with dataset config. Hence set IS_DATASET_BLACKLISTED_KEY to default
// ...false for jobs running with job configs
this(fs, props, ConfigFactory.parseMap(ImmutableMap.<String, String> of(IS_DATASET_BLACKLISTED_KEY,
IS_DATASET_BLACKLISTED_DEFAULT)), log);
}
/**
* Constructor for {@link MultiVersionCleanableDatasetBase}.
* @param fs {@link org.apache.hadoop.fs.FileSystem} where files are located.
* @param properties {@link java.util.Properties} for object.
* @param simulate whether to simulate deletes.
* @param skipTrash if true, delete files and directories immediately.
* @param deleteEmptyDirectories if true, newly empty parent directories will be deleted.
* @param deleteAsOwner if true, all deletions will be executed as the owner of the file / directory.
* @param log logger to use.
* @param isDatasetBlacklisted if true, clean will be skipped for this dataset
*
* @throws IOException
*/
public MultiVersionCleanableDatasetBase(FileSystem fs, Properties properties, boolean simulate, boolean skipTrash,
boolean deleteEmptyDirectories, boolean deleteAsOwner, boolean isDatasetBlacklisted, Logger log)
throws IOException {
this.log = log;
this.fsCleanableHelper = new FsCleanableHelper(fs, properties, simulate, skipTrash, deleteEmptyDirectories, deleteAsOwner, log);
this.fs = fs;
this.simulate = simulate;
this.skipTrash = skipTrash;
this.deleteEmptyDirectories = deleteEmptyDirectories;
this.trash = this.fsCleanableHelper.getTrash();
this.deleteAsOwner = deleteAsOwner;
this.isDatasetBlacklisted = isDatasetBlacklisted;
}
public MultiVersionCleanableDatasetBase(FileSystem fs, Properties properties, boolean simulate, boolean skipTrash,
boolean deleteEmptyDirectories, boolean deleteAsOwner, Logger log) throws IOException {
this(fs, properties, simulate, skipTrash, deleteEmptyDirectories, deleteAsOwner,
Boolean.parseBoolean(IS_DATASET_BLACKLISTED_DEFAULT), log);
}
/**
* Method to perform the Retention operations for this dataset.
*
*<ul>
* <li>{@link MultiVersionCleanableDatasetBase#getVersionFindersAndPolicies()} gets a list {@link VersionFinderAndPolicy}s
* <li>Each {@link VersionFinderAndPolicy} contains a {@link VersionFinder} and a {@link VersionSelectionPolicy}. It can
* optionally have a {@link RetentionAction}
* <li>The {@link MultiVersionCleanableDatasetBase#clean()} method finds all the {@link FileSystemDatasetVersion}s using
* {@link VersionFinderAndPolicy#versionFinder}
* <li> It gets the deletable {@link FileSystemDatasetVersion}s by applying {@link VersionFinderAndPolicy#versionSelectionPolicy}.
* These deletable version are deleted and then deletes empty parent directories.
* <li>If additional retention actions are available at {@link VersionFinderAndPolicy#getRetentionActions()}, all versions
* found by the {@link VersionFinderAndPolicy#versionFinder} are passed to {@link RetentionAction#execute(List)} for
* each {@link RetentionAction}
* </ul>
*
*/
@Override
public void clean() throws IOException {
if (this.isDatasetBlacklisted) {
this.log.info("Dataset blacklisted. Cleanup skipped for " + datasetRoot());
return;
}
boolean atLeastOneFailureSeen = false;
for (VersionFinderAndPolicy<T> versionFinderAndPolicy : getVersionFindersAndPolicies()) {
VersionSelectionPolicy<T> selectionPolicy = versionFinderAndPolicy.getVersionSelectionPolicy();
VersionFinder<? extends T> versionFinder = versionFinderAndPolicy.getVersionFinder();
if (!selectionPolicy.versionClass().isAssignableFrom(versionFinder.versionClass())) {
throw new IOException("Incompatible dataset version classes.");
}
this.log.info(String.format("Cleaning dataset %s. Using version finder %s and policy %s", this,
versionFinder.getClass().getName(), selectionPolicy));
List<T> versions = Lists.newArrayList(versionFinder.findDatasetVersions(this));
if (versions.isEmpty()) {
this.log.warn("No dataset version can be found. Ignoring.");
continue;
}
Collections.sort(versions, Collections.reverseOrder());
Collection<T> deletableVersions = selectionPolicy.listSelectedVersions(versions);
cleanImpl(deletableVersions);
List<DatasetVersion> allVersions = Lists.newArrayList();
for (T ver : versions) {
allVersions.add(ver);
}
for (RetentionAction retentionAction : versionFinderAndPolicy.getRetentionActions()) {
try {
retentionAction.execute(allVersions);
} catch (Throwable t) {
atLeastOneFailureSeen = true;
log.error(String.format("RetentionAction %s failed for dataset %s", retentionAction.getClass().getName(),
this.datasetRoot()), t);
}
}
}
if (atLeastOneFailureSeen) {
throw new RuntimeException(String.format(
"At least one failure happened while processing %s. Look for previous logs for failures", datasetRoot()));
}
}
protected void cleanImpl(Collection<T> deletableVersions) throws IOException {
this.fsCleanableHelper.clean(deletableVersions, this);
}
@Override
public String toString() {
return datasetRoot().toString();
}
@Override
public String datasetURN() {
return this.datasetRoot().toString();
}
/**
* A composition of version finder
* @param <T> the type of {@link FileSystemDatasetVersion} this version finder knows to find
*/
@Getter
@Builder
@AllArgsConstructor
public static class VersionFinderAndPolicy<T extends FileSystemDatasetVersion> {
private final VersionSelectionPolicy<T> versionSelectionPolicy;
private final VersionFinder<? extends T> versionFinder;
@Singular
private final List<RetentionAction> retentionActions;
private final Config config;
/**
* Constructor for backward compatibility
* @deprecated use {@link VersionFinderAndPolicyBuilder}
*/
@Deprecated
public VersionFinderAndPolicy(VersionSelectionPolicy<T> versionSelectionPolicy, VersionFinder<? extends T> versionFinder, Config config) {
this.versionSelectionPolicy = versionSelectionPolicy;
this.versionFinder = versionFinder;
this.retentionActions = Lists.newArrayList();
this.config = config;
}
public VersionFinderAndPolicy(RetentionPolicy<T> retentionPolicy, VersionFinder<? extends T> versionFinder, Config config) {
this(new EmbeddedRetentionSelectionPolicy<>(retentionPolicy), versionFinder, config);
}
public static class VersionFinderAndPolicyBuilder<T extends FileSystemDatasetVersion> {
@SuppressWarnings("unchecked")
public VersionFinderAndPolicy<T> build() {
VersionSelectionPolicy<T> localVersionSelectionPolicy;
List<RetentionAction> localRetentionActions;
if (this.versionSelectionPolicy == null) {
localVersionSelectionPolicy = (VersionSelectionPolicy<T>) new SelectNothingPolicy(new Properties());
} else {
localVersionSelectionPolicy = this.versionSelectionPolicy;
}
if (this.retentionActions == null) {
localRetentionActions = Lists.newArrayList();
} else {
localRetentionActions = Lists.newArrayList(this.retentionActions);
}
return new VersionFinderAndPolicy<T>(localVersionSelectionPolicy, this.versionFinder,
localRetentionActions, this.config);
}
}
}
}
| 2,389 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset/TrackingDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.dataset;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.data.management.retention.policy.RetentionPolicy;
import org.apache.gobblin.data.management.retention.policy.TimeBasedRetentionPolicy;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
import org.apache.gobblin.data.management.retention.version.finder.DateTimeDatasetVersionFinder;
import org.apache.gobblin.data.management.version.finder.VersionFinder;
/**
* {@link CleanableDatasetBase} for tracking data.
*
* Uses a {@link org.apache.gobblin.data.management.retention.version.finder.DateTimeDatasetVersionFinder} and a
* {@link org.apache.gobblin.data.management.retention.policy.TimeBasedRetentionPolicy}.
*/
public class TrackingDataset extends CleanableDatasetBase<TimestampedDatasetVersion> {
private final VersionFinder<? extends TimestampedDatasetVersion> versionFinder;
private final RetentionPolicy<TimestampedDatasetVersion> retentionPolicy;
private final Path datasetRoot;
public TrackingDataset(FileSystem fs, Properties props, Path datasetRoot)
throws IOException {
this(fs, props, datasetRoot, LoggerFactory.getLogger(TrackingDataset.class));
}
public TrackingDataset(FileSystem fs, Properties props, Path datasetRoot, Logger log)
throws IOException {
super(fs, props, log);
this.datasetRoot = datasetRoot;
// Use the deprecated version finder which uses deprecated keys
this.versionFinder = new DateTimeDatasetVersionFinder(fs, props);
this.retentionPolicy = new TimeBasedRetentionPolicy(props);
}
@Override
public VersionFinder<? extends TimestampedDatasetVersion> getVersionFinder() {
return this.versionFinder;
}
@Override
public RetentionPolicy<TimestampedDatasetVersion> getRetentionPolicy() {
return this.retentionPolicy;
}
@Override
public Path datasetRoot() {
return this.datasetRoot;
}
}
| 2,390 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset/FsCleanableHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.dataset;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;
import lombok.Getter;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.trash.ProxiedTrash;
import org.apache.gobblin.data.management.trash.TrashFactory;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.util.PathUtils;
/**
* A helper class to delete {@link Path}s of a FileSystemDatasetVersion.
* <p>
* Supports the following job level settings:
* <ul>
* <li> Simulate Mode - Only log paths to be deleted without deleting data by setting {@value #SIMULATE_KEY} to true.
* <li> Skip Trash - Delete permanent by setting {@value #SKIP_TRASH_KEY} to true.
* <li> Auto delete empty parent directories - By setting {@value #DELETE_EMPTY_DIRECTORIES_KEY} to true.
* <li> Proxy as owner and delete - By setting {@value #DELETE_AS_OWNER_KEY} to true.
* </ul>
* </p>
*/
public class FsCleanableHelper {
public static final String CONFIGURATION_KEY_PREFIX = "gobblin.retention.";
public static final String RETENTION_DATASET_ROOT = CONFIGURATION_KEY_PREFIX + "dataset.root";
public static final String SIMULATE_KEY = CONFIGURATION_KEY_PREFIX + "simulate";
public static final String SIMULATE_DEFAULT = Boolean.toString(false);
public static final String SKIP_TRASH_KEY = CONFIGURATION_KEY_PREFIX + "skip.trash";
public static final String SKIP_TRASH_DEFAULT = Boolean.toString(false);
public static final String DELETE_EMPTY_DIRECTORIES_KEY = CONFIGURATION_KEY_PREFIX + "delete.empty.directories";
public static final String DELETE_EMPTY_DIRECTORIES_DEFAULT = Boolean.toString(true);
public static final String DELETE_AS_OWNER_KEY = CONFIGURATION_KEY_PREFIX + "delete.as.owner";
public static final String DELETE_AS_OWNER_DEFAULT = Boolean.toString(true);
protected final FileSystem fs;
@Getter
protected final ProxiedTrash trash;
protected final boolean simulate;
protected final boolean skipTrash;
protected final boolean deleteEmptyDirectories;
protected final boolean deleteAsOwner;
protected final Logger log;
public FsCleanableHelper(FileSystem fs, Properties properties, boolean simulate, boolean skipTrash, boolean deleteEmptyDirectories, boolean deleteAsOwner,
Logger log) throws IOException {
this.log = log;
this.fs = fs;
this.simulate = simulate;
this.skipTrash = skipTrash;
this.deleteEmptyDirectories = deleteEmptyDirectories;
Properties thisProperties = new Properties();
thisProperties.putAll(properties);
if (this.simulate) {
thisProperties.setProperty(TrashFactory.SIMULATE, Boolean.toString(true));
}
if (this.skipTrash) {
thisProperties.setProperty(TrashFactory.SKIP_TRASH, Boolean.toString(true));
}
this.trash = TrashFactory.createProxiedTrash(this.fs, thisProperties);
this.deleteAsOwner = deleteAsOwner;
}
public FsCleanableHelper(final FileSystem fs, final Properties props, Config config, Logger log) throws IOException {
this(fs, props, Boolean.valueOf(props.getProperty(SIMULATE_KEY, SIMULATE_DEFAULT)),
Boolean.valueOf(props.getProperty(SKIP_TRASH_KEY, SKIP_TRASH_DEFAULT)),
Boolean.valueOf(props.getProperty(DELETE_EMPTY_DIRECTORIES_KEY, DELETE_EMPTY_DIRECTORIES_DEFAULT)),
Boolean.valueOf(props.getProperty(DELETE_AS_OWNER_KEY, DELETE_AS_OWNER_DEFAULT)), log);
}
/**
* Delete a single {@link FileSystemDatasetVersion}. All the parent {@link Path}s are after deletion, are
* added to <code>possiblyEmptyDirectories</code>. Caller need to call {@link #cleanEmptyDirectories(Set, FileSystemDataset)}
* to delete empty parent directories if any.
*/
public void clean(final FileSystemDatasetVersion versionToDelete, final Set<Path> possiblyEmptyDirectories) throws IOException {
log.info("Deleting dataset version " + versionToDelete);
Set<Path> pathsToDelete = versionToDelete.getPaths();
log.info("Deleting paths: " + Arrays.toString(pathsToDelete.toArray()));
boolean deletedAllPaths = true;
for (Path path : pathsToDelete) {
if (!this.fs.exists(path)) {
log.info(String.format("Path %s in dataset version %s does not exist", path, versionToDelete));
continue;
}
boolean successfullyDeleted = deleteAsOwner ? trash.moveToTrashAsOwner(path) : trash.moveToTrash(path);
if (successfullyDeleted) {
possiblyEmptyDirectories.add(path.getParent());
} else {
log.error("Failed to delete path " + path + " in dataset version " + versionToDelete);
deletedAllPaths = false;
}
}
if (!deletedAllPaths) {
log.error("Failed to delete some paths in dataset version " + versionToDelete);
}
}
/**
* Delete all {@link FileSystemDatasetVersion}s <code>deletableVersions</code> and also delete any empty parent directories.
*
* @param fsDataset to which the version belongs.
*/
public void clean(final Collection<? extends FileSystemDatasetVersion> deletableVersions, final FileSystemDataset fsDataset) throws IOException {
if (deletableVersions.isEmpty()) {
log.warn("No deletable dataset version can be found. Ignoring.");
return;
}
Set<Path> possiblyEmptyDirectories = new HashSet<>();
for (FileSystemDatasetVersion fsdv : deletableVersions) {
clean(fsdv, possiblyEmptyDirectories);
}
cleanEmptyDirectories(possiblyEmptyDirectories, fsDataset);
}
/**
* Deletes any empty paths in <code>possiblyEmptyDirectories</code> all the way upto the {@link FileSystemDataset#datasetRoot()}.
*/
public void cleanEmptyDirectories(final Set<Path> possiblyEmptyDirectories, final FileSystemDataset fsDataset) throws IOException {
if (this.deleteEmptyDirectories && !this.simulate) {
for (Path parentDirectory : possiblyEmptyDirectories) {
PathUtils.deleteEmptyParentDirectories(fs, fsDataset.datasetRoot(), parentDirectory);
}
}
}
}
| 2,391 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset/TimeBasedDatasetStoreDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.dataset;
import java.lang.reflect.InvocationTargetException;
import java.util.List;
import java.util.Properties;
import com.google.common.collect.ImmutableList;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.policy.SelectBeforeTimeBasedPolicy;
import org.apache.gobblin.data.management.policy.VersionSelectionPolicy;
import org.apache.gobblin.data.management.version.TimestampedDatasetStateStoreVersion;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
import org.apache.gobblin.data.management.version.finder.TimestampedDatasetStateStoreVersionFinder;
import org.apache.gobblin.data.management.version.finder.VersionFinder;
import org.apache.gobblin.metastore.metadata.DatasetStateStoreEntryManager;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import lombok.Data;
/**
* A {@link CleanableDatasetStoreDataset} that deletes entries before a certain time
*/
@Data
public class TimeBasedDatasetStoreDataset extends CleanableDatasetStoreDataset<TimestampedDatasetVersion> {
private static final String SELECTION_POLICY_CLASS_KEY = "selection.policy.class";
private static final String DEFAULT_SELECTION_POLICY_CLASS = SelectBeforeTimeBasedPolicy.class.getName();
private final VersionFinder<TimestampedDatasetStateStoreVersion> versionFinder;
private final VersionSelectionPolicy<TimestampedDatasetVersion> versionSelectionPolicy;
public TimeBasedDatasetStoreDataset(Key key, List<DatasetStateStoreEntryManager> entries, Properties props) {
super(key, entries);
this.versionFinder = new TimestampedDatasetStateStoreVersionFinder();
Config propsAsConfig = ConfigUtils.propertiesToConfig(props);
// strip the retention config namespace since the selection policy looks for configuration without the namespace
Config retentionConfig = ConfigUtils.getConfigOrEmpty(propsAsConfig,
ConfigurableCleanableDataset.RETENTION_CONFIGURATION_KEY);
Config retentionConfigWithFallback = retentionConfig.withFallback(propsAsConfig);
this.versionSelectionPolicy = createSelectionPolicy(ConfigUtils.getString(retentionConfigWithFallback,
SELECTION_POLICY_CLASS_KEY, DEFAULT_SELECTION_POLICY_CLASS), retentionConfigWithFallback, props);
}
@Override
public VersionFinder<TimestampedDatasetStateStoreVersion> getVersionFinder() {
return this.versionFinder;
}
@Override
public VersionSelectionPolicy<TimestampedDatasetVersion> getVersionSelectionPolicy() {
return this.versionSelectionPolicy;
}
@SuppressWarnings("unchecked")
private VersionSelectionPolicy<TimestampedDatasetVersion> createSelectionPolicy(String className,
Config config, Properties jobProps) {
try {
return (VersionSelectionPolicy<TimestampedDatasetVersion>)
GobblinConstructorUtils.invokeFirstConstructor(Class.forName(className),
ImmutableList.<Object> of(config), ImmutableList.<Object> of(config, jobProps),
ImmutableList.<Object> of(jobProps));
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
}
}
| 2,392 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset/CleanableIcebergDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.dataset;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.policy.VersionSelectionPolicy;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.finder.VersionFinder;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicyBase;
import org.apache.gobblin.iceberg.GobblinMCEProducer;
import org.apache.gobblin.metadata.OperationType;
import org.apache.gobblin.metadata.SchemaSource;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.catalog.TableIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A subclass of {@link ConfigurableCleanableDataset} that overwrite the {@link ConfigurableCleanableDataset#cleanImpl(Collection)}
* to firstly send gmce to delete dataset logically from iceberg and then process GMCEs within metadata-ingestion pipeline
* by calling {@link org.apache.iceberg.Table#expireSnapshots()} to materialize data/metadata retention
*/
public class CleanableIcebergDataset<T extends FileSystemDatasetVersion> extends ConfigurableCleanableDataset<T> {
private final static String RETENTION_INTERVAL_TIME = "retention.interval.time";
private final static String DEFAULT_RETENTION_INTERVAL_TIME = "10000";
protected Config config;
protected Properties jobProps;
Set<TableIdentifier> expiredTable;
public CleanableIcebergDataset(FileSystem fs, Properties jobProps, Path datasetRoot, Config config, Logger log)
throws IOException {
super(fs, jobProps, datasetRoot, config, log);
this.config = config;
this.jobProps = jobProps;
this.expiredTable = new HashSet<>();
}
public CleanableIcebergDataset(FileSystem fs, Properties props, Path datasetRoot) throws IOException {
this(fs, props, datasetRoot, LoggerFactory.getLogger(CleanableIcebergDataset.class));
}
public CleanableIcebergDataset(FileSystem fs, Properties props, Path datasetRoot, Logger log) throws IOException {
this(fs, props, datasetRoot, ConfigFactory.parseProperties(props), log);
}
@Override
public void clean() throws IOException {
if (this.isDatasetBlacklisted) {
this.log.info("Dataset blacklisted. Cleanup skipped for " + datasetRoot());
return;
}
boolean atLeastOneFailureSeen = false;
for (VersionFinderAndPolicy<T> versionFinderAndPolicy : getVersionFindersAndPolicies()) {
Config retentionConfig = versionFinderAndPolicy.getConfig();
Preconditions.checkArgument(retentionConfig != null,
"Must specify retention config for iceberg dataset retention");
VersionSelectionPolicy<T> selectionPolicy = versionFinderAndPolicy.getVersionSelectionPolicy();
VersionFinder<? extends T> versionFinder = versionFinderAndPolicy.getVersionFinder();
if (!selectionPolicy.versionClass().isAssignableFrom(versionFinder.versionClass())) {
throw new IOException("Incompatible dataset version classes.");
}
this.log.info(String.format("Cleaning dataset %s. Using version finder %s and policy %s", this,
versionFinder.getClass().getName(), selectionPolicy));
List<T> versions = Lists.newArrayList(versionFinder.findDatasetVersions(this));
if (versions.isEmpty()) {
this.log.warn("No dataset version can be found. Ignoring.");
continue;
}
Collections.sort(versions, Collections.reverseOrder());
Collection<T> deletableVersions = selectionPolicy.listSelectedVersions(versions);
cleanImpl(deletableVersions, retentionConfig);
}
if (atLeastOneFailureSeen) {
throw new RuntimeException(
String.format("At least one failure happened while processing %s. Look for previous logs for failures",
datasetRoot()));
}
try {
// Sleep for a while to make sure metadata pipeline won't get bunch of retention events at the same time, which will
// affect the SLA of retention pipeline
Thread.sleep(Long.parseLong(jobProps.getProperty(RETENTION_INTERVAL_TIME, DEFAULT_RETENTION_INTERVAL_TIME)));
} catch (InterruptedException e) {
log.error("interrupted while sleep");
throw new IOException(e);
}
}
/**
* Only in charge of filing {@link org.apache.gobblin.metadata.GobblinMetadataChangeEvent}
* The processing of these events can be seen in {@link org.apache.gobblin.iceberg.writer.IcebergMetadataWriter}.
*/
protected void cleanImpl(Collection<T> deletableVersions, Config retentionConfig) throws IOException {
List<String> deletablePrefix = new ArrayList<>();
for (T version : deletableVersions) {
version.getPaths().forEach(p -> deletablePrefix.add(fs.makeQualified(p).toString()));
}
if (deletablePrefix.isEmpty()) {
return;
}
Preconditions.checkArgument(retentionConfig.hasPath(ConfigurationKeys.HIVE_REGISTRATION_POLICY));
Preconditions.checkArgument(retentionConfig.hasPath(HiveRegistrationPolicyBase.HIVE_DATABASE_NAME));
Properties prop = new Properties();
prop.putAll(jobProps);
State producerState = new State(prop);
producerState.setProp(ConfigurationKeys.HIVE_REGISTRATION_POLICY,
retentionConfig.getString(ConfigurationKeys.HIVE_REGISTRATION_POLICY));
producerState.setProp(GobblinMCEProducer.OLD_FILES_HIVE_REGISTRATION_KEY,
retentionConfig.getString(ConfigurationKeys.HIVE_REGISTRATION_POLICY));
producerState.setProp(HiveRegistrationPolicyBase.HIVE_DATABASE_NAME,
retentionConfig.getString(HiveRegistrationPolicyBase.HIVE_DATABASE_NAME));
if (retentionConfig.hasPath(HiveRegistrationPolicyBase.ADDITIONAL_HIVE_DATABASE_NAMES)) {
producerState.setProp(HiveRegistrationPolicyBase.ADDITIONAL_HIVE_DATABASE_NAMES,
retentionConfig.getString(HiveRegistrationPolicyBase.ADDITIONAL_HIVE_DATABASE_NAMES));
}
producerState.setProp(ConfigurationKeys.DATA_PUBLISHER_DATASET_DIR, this.datasetURN());
if (!this.simulate) {
try (GobblinMCEProducer producer = GobblinMCEProducer.getGobblinMCEProducer(producerState)) {
producer.sendGMCE(null, null, deletablePrefix, null, OperationType.drop_files, SchemaSource.NONE);
log.info("Sent gmce to delete path {} from icebergTable",
deletablePrefix.stream().map(Object::toString).collect(Collectors.joining(",")));
}
} else {
log.info("In simulate mode, going to send gmce to delete path {} from icebergTable",
deletablePrefix.stream().map(Object::toString).collect(Collectors.joining(",")));
}
}
}
| 2,393 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset/CleanableDatasetStoreDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.dataset;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import org.apache.gobblin.data.management.policy.VersionSelectionPolicy;
import org.apache.gobblin.data.management.version.DatasetStateStoreVersion;
import org.apache.gobblin.data.management.version.DatasetVersion;
import org.apache.gobblin.data.management.version.finder.VersionFinder;
import org.apache.gobblin.metastore.DatasetStoreDataset;
import org.apache.gobblin.metastore.metadata.DatasetStateStoreEntryManager;
import com.google.common.collect.Lists;
/**
* A cleanable {@link DatasetStoreDataset}
*/
public abstract class CleanableDatasetStoreDataset<T extends DatasetVersion> extends DatasetStoreDataset implements CleanableDataset {
public CleanableDatasetStoreDataset(DatasetStoreDataset.Key key, List<DatasetStateStoreEntryManager> entries) {
super(key, entries);
}
public abstract VersionFinder<? extends T> getVersionFinder();
public abstract VersionSelectionPolicy<T> getVersionSelectionPolicy();
@Override
public void clean() throws IOException {
List<T> versions = Lists.newArrayList(this.getVersionFinder().findDatasetVersions(this));
Collections.sort(versions, Collections.reverseOrder());
Collection<T> deletableVersions = this.getVersionSelectionPolicy().listSelectedVersions(versions);
for (Object version : deletableVersions) {
((DatasetStateStoreVersion) version).getEntry().delete();
}
}
}
| 2,394 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset/SnapshotDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.dataset;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.data.management.retention.policy.NewestKRetentionPolicy;
import org.apache.gobblin.data.management.retention.policy.RetentionPolicy;
import org.apache.gobblin.data.management.retention.version.StringDatasetVersion;
import org.apache.gobblin.data.management.retention.version.finder.VersionFinder;
import org.apache.gobblin.data.management.retention.version.finder.WatermarkDatasetVersionFinder;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
/**
* {@link CleanableDatasetBase} for snapshot datasets.
*
* Uses a {@link org.apache.gobblin.data.management.retention.version.finder.WatermarkDatasetVersionFinder} and a
* {@link org.apache.gobblin.data.management.retention.policy.NewestKRetentionPolicy}.
*/
public class SnapshotDataset extends CleanableDatasetBase<FileSystemDatasetVersion> {
private final VersionFinder<StringDatasetVersion> versionFinder;
private final RetentionPolicy<FileSystemDatasetVersion> retentionPolicy;
private final Path datasetRoot;
public SnapshotDataset(FileSystem fs, Properties props, Path datasetRoot) throws IOException {
this(fs, props, datasetRoot, LoggerFactory.getLogger(SnapshotDataset.class));
}
public SnapshotDataset(FileSystem fs, Properties props, Path datasetRoot, Logger log)
throws IOException {
super(fs, props, log);
this.datasetRoot = datasetRoot;
this.versionFinder = new WatermarkDatasetVersionFinder(fs, props);
this.retentionPolicy = new NewestKRetentionPolicy<FileSystemDatasetVersion>(props);
}
@Override
public VersionFinder<? extends FileSystemDatasetVersion> getVersionFinder() {
return this.versionFinder;
}
@Override
public RetentionPolicy<FileSystemDatasetVersion> getRetentionPolicy() {
return this.retentionPolicy;
}
@Override
public Path datasetRoot() {
return this.datasetRoot;
}
}
| 2,395 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset/CleanableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.dataset;
import java.io.IOException;
import org.apache.gobblin.dataset.Dataset;
/**
* An abstraction for a set of files where a simple {@link org.apache.gobblin.data.management.retention.policy.RetentionPolicy}
* can be applied.
*/
public interface CleanableDataset extends Dataset {
/**
* Cleans the {@link CleanableDataset}. In general, this means to apply a
* {@link org.apache.gobblin.data.management.retention.policy.RetentionPolicy} and delete files and directories that need deleting.
* @throws IOException
*/
public void clean() throws IOException;
}
| 2,396 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset/ConfigurableCleanableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.dataset;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigRenderOptions;
import org.apache.gobblin.data.management.policy.SelectNothingPolicy;
import org.apache.gobblin.data.management.policy.VersionSelectionPolicy;
import org.apache.gobblin.data.management.retention.action.MultiAccessControlAction.MultiAccessControlActionFactory;
import org.apache.gobblin.data.management.retention.action.RetentionAction;
import org.apache.gobblin.data.management.retention.action.RetentionAction.RetentionActionFactory;
import org.apache.gobblin.data.management.retention.dataset.MultiVersionCleanableDatasetBase.VersionFinderAndPolicy.VersionFinderAndPolicyBuilder;
import org.apache.gobblin.data.management.retention.policy.RetentionPolicy;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.finder.VersionFinder;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* {@link CleanableDatasetBase} that instantiates {@link VersionFinder} and {@link RetentionPolicy} from classes read
* from an input {@link java.util.Properties}.
*
* <p>
* The class of {@link VersionFinder} should be under key {@link #VERSION_FINDER_CLASS_KEY}, while the class of
* {@link RetentionPolicy} should be under key {@link #RETENTION_POLICY_CLASS_KEY}.
* </p>
*/
public class ConfigurableCleanableDataset<T extends FileSystemDatasetVersion>
extends MultiVersionCleanableDatasetBase<T> {
public static final String RETENTION_CONFIGURATION_KEY = "gobblin.retention";
public static final String CONFIGURATION_KEY_PREFIX = RETENTION_CONFIGURATION_KEY + ".";
public static final String VERSION_FINDER_CLASS_KEY = CONFIGURATION_KEY_PREFIX + "version.finder.class";
public static final String RETENTION_POLICY_CLASS_KEY = CONFIGURATION_KEY_PREFIX + "retention.policy.class";
public static final String SELECTION_POLICY_CLASS_KEY = CONFIGURATION_KEY_PREFIX + "selection.policy.class";
/**
* This key is used if the dataset contains multiple partition each with its own version finder and selection policy.
*
* gobblin.retention.dataset.partitions is a list of version finder and policies.
*
* E.g.
* <pre>
*
* gobblin.retention {
* partitions : [
* {
* selection {
* policy.class = data.management.policy.SelectBeforeTimeBasedPolicy
* timeBased.lookbackTime = 5d
* }
* version : {
* finder.class=org.apache.gobblin.data.management.version.finder.DateTimeDatasetVersionFinder
* pattern="hourly/*/"
* }
* },
* {
* selection {
* policy.class = data.management.policy.SelectBeforeTimeBasedPolicy
* timeBased.lookbackTime = 20d
* }
* version : {
* finder.class=org.apache.gobblin.data.management.version.finder.DateTimeDatasetVersionFinder
* pattern="daily/*/"
* }
* }
* ]
* }
*
*
* </pre>
*/
public static final String DATASET_PARTITIONS_LIST_KEY = CONFIGURATION_KEY_PREFIX + "dataset.partitions";
/**
* An option that allows user to create an alias of the pair (policy.class,version.finder.class)
* Mainly for simplifying configStore (e.g. in HDFS) file hierarchy.
*/
public static final String DATASET_VERSION_POLICY_ALIAS = CONFIGURATION_KEY_PREFIX + "versionAndPolicy.alias";
private final Path datasetRoot;
private final List<VersionFinderAndPolicy<T>> versionFindersAndPolicies;
/**
* A set of all available {@link RetentionActionFactory}s
*/
private static final Set<Class<? extends RetentionActionFactory>> RETENTION_ACTION_TYPES;
static {
RETENTION_ACTION_TYPES = ImmutableSet.<Class<? extends RetentionActionFactory>>of(MultiAccessControlActionFactory.class);
}
/**
* Creates a new ConfigurableCleanableDataset configured through gobblin-config-management. The constructor expects
* {@link #VERSION_FINDER_CLASS_KEY} and {@link #RETENTION_POLICY_CLASS_KEY} to be available in the
* <code>config</code> passed.
*/
public ConfigurableCleanableDataset(FileSystem fs, Properties jobProps, Path datasetRoot, Config config, Logger log)
throws IOException {
super(fs, jobProps, config, log);
this.datasetRoot = datasetRoot;
this.versionFindersAndPolicies = Lists.newArrayList();
if (config.hasPath(DATASET_VERSION_POLICY_ALIAS)) {
initWithSelectionPolicy(config.getConfig(DATASET_VERSION_POLICY_ALIAS), jobProps);
} else if (config.hasPath(VERSION_FINDER_CLASS_KEY) && config.hasPath(RETENTION_POLICY_CLASS_KEY)) {
initWithRetentionPolicy(config, jobProps, RETENTION_POLICY_CLASS_KEY, VERSION_FINDER_CLASS_KEY);
} else if (config.hasPath(VERSION_FINDER_CLASS_KEY)) {
initWithSelectionPolicy(config.getConfig(RETENTION_CONFIGURATION_KEY), jobProps);
} else if (config.hasPath(DATASET_PARTITIONS_LIST_KEY)) {
List<? extends Config> versionAndPolicies = config.getConfigList(DATASET_PARTITIONS_LIST_KEY);
for (Config versionAndPolicy : versionAndPolicies) {
initWithSelectionPolicy(versionAndPolicy, jobProps);
}
} else {
throw new IllegalArgumentException(
String.format("Either set version finder at %s and retention policy at %s or set partitions at %s",
VERSION_FINDER_CLASS_KEY, RETENTION_POLICY_CLASS_KEY, DATASET_PARTITIONS_LIST_KEY));
}
}
public ConfigurableCleanableDataset(FileSystem fs, Properties props, Path datasetRoot) throws IOException {
this(fs, props, datasetRoot, LoggerFactory.getLogger(ConfigurableCleanableDataset.class));
}
public ConfigurableCleanableDataset(FileSystem fs, Properties props, Path datasetRoot, Logger log)
throws IOException {
this(fs, props, datasetRoot, ConfigFactory.parseProperties(props), log);
}
@Override
public Path datasetRoot() {
return this.datasetRoot;
}
@Override
public List<VersionFinderAndPolicy<T>> getVersionFindersAndPolicies() {
return this.versionFindersAndPolicies;
}
private void initWithRetentionPolicy(Config config, Properties jobProps, String retentionPolicyKey,
String versionFinderKey) {
this.versionFindersAndPolicies
.add(new VersionFinderAndPolicy<>(createRetentionPolicy(config.getString(retentionPolicyKey), config, jobProps),
createVersionFinder(config.getString(versionFinderKey), config, jobProps), config));
}
private void initWithSelectionPolicy(Config config, Properties jobProps) {
String selectionPolicyKey = StringUtils.substringAfter(SELECTION_POLICY_CLASS_KEY, CONFIGURATION_KEY_PREFIX);
String versionFinderKey = StringUtils.substringAfter(VERSION_FINDER_CLASS_KEY, CONFIGURATION_KEY_PREFIX);
Preconditions.checkArgument(
config.hasPath(versionFinderKey),
String.format("Version finder class is required at %s in config %s", versionFinderKey,
config.root().render(ConfigRenderOptions.concise())));
VersionFinderAndPolicyBuilder<T> builder = VersionFinderAndPolicy.builder();
builder.config(config);
builder.versionFinder(createVersionFinder(config.getString(versionFinderKey), config, jobProps));
if (config.hasPath(selectionPolicyKey)) {
builder.versionSelectionPolicy(createSelectionPolicy(
ConfigUtils.getString(config, selectionPolicyKey, SelectNothingPolicy.class.getName()), config, jobProps));
}
for (Class<? extends RetentionActionFactory> factoryClass : RETENTION_ACTION_TYPES) {
try {
RetentionActionFactory factory = factoryClass.newInstance();
if (factory.canCreateWithConfig(config)) {
builder.retentionAction((RetentionAction) factory.createRetentionAction(config, this.fs,
ConfigUtils.propertiesToConfig(jobProps)));
}
} catch (InstantiationException | IllegalAccessException e) {
Throwables.propagate(e);
}
}
this.versionFindersAndPolicies.add(builder.build());
}
@SuppressWarnings("unchecked")
private VersionFinder<? extends T> createVersionFinder(String className, Config config, Properties jobProps) {
try {
return (VersionFinder<? extends T>) GobblinConstructorUtils.invokeFirstConstructor(Class.forName(className),
ImmutableList.<Object> of(this.fs, config), ImmutableList.<Object> of(this.fs, jobProps));
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
}
@SuppressWarnings("unchecked")
private RetentionPolicy<T> createRetentionPolicy(String className, Config config, Properties jobProps) {
try {
return (RetentionPolicy<T>) GobblinConstructorUtils.invokeFirstConstructor(Class.forName(className),
ImmutableList.<Object> of(config), ImmutableList.<Object> of(config, jobProps),
ImmutableList.<Object> of(jobProps));
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
}
@SuppressWarnings("unchecked")
private VersionSelectionPolicy<T> createSelectionPolicy(String className, Config config, Properties jobProps) {
try {
this.log.debug(String.format("Configuring selection policy %s for %s with %s", className, this.datasetRoot,
config.root().render(ConfigRenderOptions.concise())));
return (VersionSelectionPolicy<T>) GobblinConstructorUtils.invokeFirstConstructor(Class.forName(className),
ImmutableList.<Object> of(config), ImmutableList.<Object> of(config, jobProps),
ImmutableList.<Object> of(jobProps));
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
}
}
| 2,397 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset/CleanableDatasetBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.dataset;
import com.typesafe.config.ConfigFactory;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.slf4j.Logger;
import com.google.common.collect.ImmutableList;
import com.typesafe.config.Config;
import org.apache.gobblin.data.management.retention.policy.RetentionPolicy;
import org.apache.gobblin.data.management.version.FileSystemDatasetVersion;
import org.apache.gobblin.data.management.version.finder.VersionFinder;
import org.apache.gobblin.dataset.FileSystemDataset;
/**
* Implementation of a {@link CleanableDataset} that uses a
* {@link org.apache.gobblin.data.management.retention.version.finder.VersionFinder} to find dataset versions, a
* {@link org.apache.gobblin.data.management.retention.policy.RetentionPolicy} to figure out deletable versions, and then deletes
* those files and newly empty parent directories.
*
* <p>
* Concrete subclasses should implement {@link #getVersionFinder} and {@link #getRetentionPolicy}.
* </p>
*
* <p>
* Datasets are directories in the filesystem containing data files organized in version-like directory structures.
* Example datasets:
* </p>
*
* <p>
* For snapshot based datasets, with the directory structure:
* <pre>
* /path/to/table/
* snapshot1/
* dataFiles...
* snapshot2/
* dataFiles...
* </pre>
* each of snapshot1 and snapshot2 are dataset versions.
* </p>
*
* <p>
* For tracking datasets, with the directory structure:
* <pre>
* /path/to/tracking/data/
* 2015/
* 06/
* 01/
* dataFiles...
* 02/
* dataFiles...
* </pre>
* each of 2015/06/01 and 2015/06/02 are dataset versions.
* </p>
*
* <p>
* {@link CleanableDatasetBase} uses a {@link org.apache.gobblin.data.management.version.finder.DatasetVersionFinder} to find all
* subdirectories that are versions of this dataset. After that, for each dataset, it uses a
* {@link org.apache.gobblin.data.management.retention.policy.RetentionPolicy} to decide which versions of the dataset should be
* deleted. For each version deleted, if {@link #deleteEmptyDirectories} it will also look at all parent directories
* and delete directories that are now empty, up to but not including the dataset root.
* </p>
*
* @param <T> type of {@link org.apache.gobblin.data.management.retention.version.DatasetVersion} supported by this
* {@link CleanableDataset}.
*/
public abstract class CleanableDatasetBase<T extends FileSystemDatasetVersion>
extends MultiVersionCleanableDatasetBase<T> implements CleanableDataset, FileSystemDataset {
/**
* Get {@link org.apache.gobblin.data.management.retention.version.finder.VersionFinder} to use.
*/
public abstract VersionFinder<? extends T> getVersionFinder();
/**
* Get {@link org.apache.gobblin.data.management.retention.policy.RetentionPolicy} to use.
*/
public abstract RetentionPolicy<T> getRetentionPolicy();
public CleanableDatasetBase(final FileSystem fs, final Properties props, Config config, Logger log)
throws IOException {
super(fs, props, config, log);
}
public CleanableDatasetBase(final FileSystem fs, final Properties props, Logger log) throws IOException {
super(fs, props, log);
}
public CleanableDatasetBase(FileSystem fs, Properties properties, boolean simulate, boolean skipTrash,
boolean deleteEmptyDirectories, boolean deleteAsOwner, boolean isDatasetBlacklisted, Logger log)
throws IOException {
super(fs, properties, simulate, skipTrash, deleteEmptyDirectories, deleteAsOwner, isDatasetBlacklisted, log);
}
public CleanableDatasetBase(FileSystem fs, Properties properties, boolean simulate, boolean skipTrash,
boolean deleteEmptyDirectories, boolean deleteAsOwner, Logger log) throws IOException {
super(fs, properties, simulate, skipTrash, deleteEmptyDirectories, deleteAsOwner,
Boolean.parseBoolean(IS_DATASET_BLACKLISTED_DEFAULT), log);
}
@Override
public List<VersionFinderAndPolicy<T>> getVersionFindersAndPolicies() {
return ImmutableList
.<VersionFinderAndPolicy<T>> of(new VersionFinderAndPolicy<>(getRetentionPolicy(), getVersionFinder(),
ConfigFactory.empty()));
}
}
| 2,398 |
0 | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention | Create_ds/gobblin/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/dataset/ModificationTimeDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.retention.dataset;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.data.management.retention.policy.RetentionPolicy;
import org.apache.gobblin.data.management.retention.policy.TimeBasedRetentionPolicy;
import org.apache.gobblin.data.management.version.TimestampedDatasetVersion;
import org.apache.gobblin.data.management.version.finder.ModDateTimeDatasetVersionFinder;
import org.apache.gobblin.data.management.version.finder.VersionFinder;
/**
* {@link CleanableDatasetBase} for a modification time based dataset.
*
* Uses a {@link ModDateTimeDatasetVersionFinder} and a {@link TimeBasedRetentionPolicy}.
*/
public class ModificationTimeDataset extends CleanableDatasetBase<TimestampedDatasetVersion> {
private final VersionFinder<TimestampedDatasetVersion> versionFinder;
private final RetentionPolicy<TimestampedDatasetVersion> retentionPolicy;
private final Path datasetRoot;
public ModificationTimeDataset(FileSystem fs, Properties props, Path datasetRoot) throws IOException {
this(fs, props, datasetRoot, LoggerFactory.getLogger(ModificationTimeDataset.class));
}
public ModificationTimeDataset(FileSystem fs, Properties props, Path datasetRoot, Logger log) throws IOException {
super(fs, props, log);
this.versionFinder = new ModDateTimeDatasetVersionFinder(fs, props);
this.retentionPolicy = new TimeBasedRetentionPolicy(props);
this.datasetRoot = datasetRoot;
}
@Override
public Path datasetRoot() {
return this.datasetRoot;
}
@Override
public VersionFinder<? extends TimestampedDatasetVersion> getVersionFinder() {
return this.versionFinder;
}
@Override
public RetentionPolicy<TimestampedDatasetVersion> getRetentionPolicy() {
return this.retentionPolicy;
}
}
| 2,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.