comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
I think you only want to fallback to minor version if majorVersions are not equal maybe easier to use Compator.comparingInt(...).thenComparing(...) to do the work | boolean isGreaterThan(OTelVersion oTelVersion) {
if (this.oTelVersionAsString.equals(oTelVersion.oTelVersionAsString)) {
return false;
}
if (this.majorVersion > oTelVersion.majorVersion) {
return true;
}
if (this.minorVersionVersion > oTelVersion.minorVersionVersion) {
return true;
}
if (this.subMinorVersion > oTelVersion.subMinorVersion) {
return true;
}
return false;
} | } | boolean isGreaterThan(OTelVersion oTelVersion) {
if (this.otelVersionAsString.equals(oTelVersion.otelVersionAsString)) {
return false;
}
if (this.majorVersion > oTelVersion.majorVersion) {
return true;
}
if (this.minorVersion > oTelVersion.minorVersion) {
return true;
}
if (this.patchVersion > oTelVersion.patchVersion) {
return true;
}
return false;
} | class OTelVersion {
private final String oTelVersionAsString;
private final int majorVersion;
private final int minorVersionVersion;
private final int subMinorVersion;
OTelVersion(String oTelVersionAsString) {
this.oTelVersionAsString = oTelVersionAsString;
String[] versionComponents = oTelVersionAsString.split("\\.");
this.majorVersion = Integer.parseInt(versionComponents[0]);
this.minorVersionVersion = Integer.parseInt(versionComponents[1]);
this.subMinorVersion = Integer.parseInt(versionComponents[2]);
}
boolean isLessThan(OTelVersion oTelVersion) {
if (this.oTelVersionAsString.equals(oTelVersion.oTelVersionAsString)) {
return false;
}
return !isGreaterThan(oTelVersion);
}
} | class OTelVersion {
private final String otelVersionAsString;
private final int majorVersion;
private final int minorVersion;
private final int patchVersion;
OTelVersion(String otelVersionAsString) {
this.otelVersionAsString = otelVersionAsString;
String[] versionComponents = otelVersionAsString.split("\\.");
this.majorVersion = Integer.parseInt(versionComponents[0]);
this.minorVersion = Integer.parseInt(versionComponents[1]);
this.patchVersion = Integer.parseInt(versionComponents[2]);
}
boolean isLessThan(OTelVersion oTelVersion) {
if (this.otelVersionAsString.equals(oTelVersion.otelVersionAsString)) {
return false;
}
return !isGreaterThan(oTelVersion);
}
boolean hasSameMajorVersionAs(OTelVersion oTelVersion) {
return this.majorVersion == oTelVersion.majorVersion;
}
} |
with `StepVerifier` we don't need to use block(), you can test using the reactive calls directly. This is not a blocker, but something to be improved in a follow up PR. | public void testBulkShouldFailIfEtagDoesNotMatch() {
Flux<CourseWithEtag> insertedCourseWithEtagFlux = reactiveCourseWithEtagRepository.saveAll(Flux.just(createCourseWithEtag()));
List<CourseWithEtag> insertedCourseWithEtag = insertedCourseWithEtagFlux.collectList().block();
Assert.assertEquals(insertedCourseWithEtag.size(), 1);
insertedCourseWithEtag.get(0).setName("CHANGED");
Flux<CourseWithEtag> updatedCourseWithEtagFlux = reactiveCourseWithEtagRepository.saveAll(insertedCourseWithEtag);
List<CourseWithEtag> updatedCourseWithEtag = updatedCourseWithEtagFlux.collectList().block();
Assert.assertEquals(updatedCourseWithEtag.size(), 1);
updatedCourseWithEtag.get(0).setEtag(insertedCourseWithEtag.get(0).getEtag());
Flux<CourseWithEtag> courseFlux = reactiveCourseWithEtagRepository.saveAll(updatedCourseWithEtag);
StepVerifier.create(courseFlux);
Assert.assertEquals(courseFlux.collectList().block().size(), 0);
reactiveCourseWithEtagRepository.deleteAll(updatedCourseWithEtag);
Flux<CourseWithEtag> courseFlux2 = reactiveCourseWithEtagRepository.findAll();
StepVerifier.create(courseFlux2);
Assert.assertEquals(courseFlux2.collectList().block().size(), 1);
} | Assert.assertEquals(courseFlux.collectList().block().size(), 0); | public void testBulkShouldFailIfEtagDoesNotMatch() {
Flux<CourseWithEtag> insertedCourseWithEtagFlux = reactiveCourseWithEtagRepository.saveAll(Flux.just(createCourseWithEtag()));
List<CourseWithEtag> insertedCourseWithEtag = insertedCourseWithEtagFlux.collectList().block();
Assert.assertEquals(insertedCourseWithEtag.size(), 1);
insertedCourseWithEtag.get(0).setName("CHANGED");
Flux<CourseWithEtag> updatedCourseWithEtagFlux = reactiveCourseWithEtagRepository.saveAll(insertedCourseWithEtag);
List<CourseWithEtag> updatedCourseWithEtag = updatedCourseWithEtagFlux.collectList().block();
Assert.assertEquals(updatedCourseWithEtag.size(), 1);
updatedCourseWithEtag.get(0).setEtag(insertedCourseWithEtag.get(0).getEtag());
Flux<CourseWithEtag> courseFlux = reactiveCourseWithEtagRepository.saveAll(updatedCourseWithEtag);
StepVerifier.create(courseFlux);
Assert.assertEquals(courseFlux.collectList().block().size(), 0);
reactiveCourseWithEtagRepository.deleteAll(updatedCourseWithEtag);
Flux<CourseWithEtag> courseFlux2 = reactiveCourseWithEtagRepository.findAll();
StepVerifier.create(courseFlux2);
Assert.assertEquals(courseFlux2.collectList().block().size(), 1);
} | class ReactiveEtagIT {
@ClassRule
public static final ReactiveIntegrationTestCollectionManager collectionManager = new ReactiveIntegrationTestCollectionManager();
@Autowired
ReactiveCosmosTemplate template;
@Autowired
ReactiveCourseWithEtagRepository reactiveCourseWithEtagRepository;
@Before
public void setup() {
collectionManager.ensureContainersCreatedAndEmpty(template, CourseWithEtag.class);
}
private static CourseWithEtag createCourseWithEtag() {
return new CourseWithEtag(UUID.randomUUID().toString(), COURSE_NAME, DEPARTMENT);
}
@Test
public void testCrudOperationsShouldApplyEtag() {
final Mono<CourseWithEtag> insertedCourseWithEtagMono =
reactiveCourseWithEtagRepository.save(createCourseWithEtag());
CourseWithEtag insertedCourseWithEtag = insertedCourseWithEtagMono.block();
Assert.assertNotNull(insertedCourseWithEtag);
Assert.assertNotNull(insertedCourseWithEtag.getEtag());
insertedCourseWithEtag.setName("CHANGED");
final Mono<CourseWithEtag> updatedCourseWithEtagMono =
reactiveCourseWithEtagRepository.save(insertedCourseWithEtag);
CourseWithEtag updatedCourseWithEtag = updatedCourseWithEtagMono.block();
Assert.assertNotNull(updatedCourseWithEtag);
Assert.assertNotNull(updatedCourseWithEtag.getEtag());
Assert.assertNotEquals(updatedCourseWithEtag.getEtag(), insertedCourseWithEtag.getEtag());
final Mono<CourseWithEtag> foundCourseWithEtagMono =
reactiveCourseWithEtagRepository.findById(insertedCourseWithEtag.getCourseId());
CourseWithEtag foundCourseWithEtag = foundCourseWithEtagMono.block();
Assert.assertNotNull(foundCourseWithEtag);
Assert.assertNotNull(foundCourseWithEtag.getEtag());
Assert.assertEquals(foundCourseWithEtag.getEtag(), updatedCourseWithEtag.getEtag());
}
@Test
public void testCrudListOperationsShouldApplyEtag() {
final List<CourseWithEtag> courses = new ArrayList<>();
courses.add(createCourseWithEtag());
courses.add(createCourseWithEtag());
final Flux<CourseWithEtag> insertedCourseWithEtagsFlux = reactiveCourseWithEtagRepository.saveAll(courses);
List<CourseWithEtag> insertedCourseWithEtags = insertedCourseWithEtagsFlux.collectList().block();
Assert.assertNotNull(insertedCourseWithEtags);
insertedCourseWithEtags.forEach(course -> Assert.assertNotNull(course.getEtag()));
insertedCourseWithEtags.forEach(course -> course.setName("CHANGED"));
final Flux<CourseWithEtag> updatedCourseWithEtagsFlux =
reactiveCourseWithEtagRepository.saveAll(insertedCourseWithEtags);
List<CourseWithEtag> updatedCourseWithEtags = updatedCourseWithEtagsFlux.collectList().block();
Assert.assertNotNull(updatedCourseWithEtags);
insertedCourseWithEtags.sort(Comparator.comparing(CourseWithEtag::getCourseId));
updatedCourseWithEtags.sort(Comparator.comparing(CourseWithEtag::getCourseId));
for (int i = 0; i < updatedCourseWithEtags.size(); i++) {
CourseWithEtag insertedCourseWithEtag = insertedCourseWithEtags.get(i);
CourseWithEtag updatedCourseWithEtag = updatedCourseWithEtags.get(i);
Assert.assertEquals(insertedCourseWithEtag.getCourseId(), updatedCourseWithEtag.getCourseId());
Assert.assertNotNull(updatedCourseWithEtag.getEtag());
Assert.assertNotEquals(insertedCourseWithEtag.getEtag(), updatedCourseWithEtag.getEtag());
}
}
@Test
public void testShouldFailIfEtagDoesNotMatch() {
Mono<CourseWithEtag> insertedCourseWithEtagMono = reactiveCourseWithEtagRepository.save(createCourseWithEtag());
CourseWithEtag insertedCourseWithEtag = insertedCourseWithEtagMono.block();
Assert.assertNotNull(insertedCourseWithEtag);
insertedCourseWithEtag.setName("CHANGED");
Mono<CourseWithEtag> updatedCourseWithEtagMono = reactiveCourseWithEtagRepository.save(insertedCourseWithEtag);
CourseWithEtag updatedCourseWithEtag = updatedCourseWithEtagMono.block();
Assert.assertNotNull(updatedCourseWithEtag);
updatedCourseWithEtag.setEtag(insertedCourseWithEtag.getEtag());
Mono<CourseWithEtag> courseMono = reactiveCourseWithEtagRepository.save(updatedCourseWithEtag);
StepVerifier.create(courseMono).verifyError(CosmosAccessException.class);
Mono<Void> deleteMono = reactiveCourseWithEtagRepository.delete(updatedCourseWithEtag);
StepVerifier.create(deleteMono).verifyError(CosmosAccessException.class);
}
@Test
} | class ReactiveEtagIT {
@ClassRule
public static final ReactiveIntegrationTestCollectionManager collectionManager = new ReactiveIntegrationTestCollectionManager();
@Autowired
ReactiveCosmosTemplate template;
@Autowired
ReactiveCourseWithEtagRepository reactiveCourseWithEtagRepository;
@Before
public void setup() {
collectionManager.ensureContainersCreatedAndEmpty(template, CourseWithEtag.class);
}
private static CourseWithEtag createCourseWithEtag() {
return new CourseWithEtag(UUID.randomUUID().toString(), COURSE_NAME, DEPARTMENT);
}
@Test
public void testCrudOperationsShouldApplyEtag() {
final Mono<CourseWithEtag> insertedCourseWithEtagMono =
reactiveCourseWithEtagRepository.save(createCourseWithEtag());
CourseWithEtag insertedCourseWithEtag = insertedCourseWithEtagMono.block();
Assert.assertNotNull(insertedCourseWithEtag);
Assert.assertNotNull(insertedCourseWithEtag.getEtag());
insertedCourseWithEtag.setName("CHANGED");
final Mono<CourseWithEtag> updatedCourseWithEtagMono =
reactiveCourseWithEtagRepository.save(insertedCourseWithEtag);
CourseWithEtag updatedCourseWithEtag = updatedCourseWithEtagMono.block();
Assert.assertNotNull(updatedCourseWithEtag);
Assert.assertNotNull(updatedCourseWithEtag.getEtag());
Assert.assertNotEquals(updatedCourseWithEtag.getEtag(), insertedCourseWithEtag.getEtag());
final Mono<CourseWithEtag> foundCourseWithEtagMono =
reactiveCourseWithEtagRepository.findById(insertedCourseWithEtag.getCourseId());
CourseWithEtag foundCourseWithEtag = foundCourseWithEtagMono.block();
Assert.assertNotNull(foundCourseWithEtag);
Assert.assertNotNull(foundCourseWithEtag.getEtag());
Assert.assertEquals(foundCourseWithEtag.getEtag(), updatedCourseWithEtag.getEtag());
}
@Test
public void testCrudListOperationsShouldApplyEtag() {
final List<CourseWithEtag> courses = new ArrayList<>();
courses.add(createCourseWithEtag());
courses.add(createCourseWithEtag());
final Flux<CourseWithEtag> insertedCourseWithEtagsFlux = reactiveCourseWithEtagRepository.saveAll(courses);
List<CourseWithEtag> insertedCourseWithEtags = insertedCourseWithEtagsFlux.collectList().block();
Assert.assertNotNull(insertedCourseWithEtags);
insertedCourseWithEtags.forEach(course -> Assert.assertNotNull(course.getEtag()));
insertedCourseWithEtags.forEach(course -> course.setName("CHANGED"));
final Flux<CourseWithEtag> updatedCourseWithEtagsFlux =
reactiveCourseWithEtagRepository.saveAll(insertedCourseWithEtags);
List<CourseWithEtag> updatedCourseWithEtags = updatedCourseWithEtagsFlux.collectList().block();
Assert.assertNotNull(updatedCourseWithEtags);
insertedCourseWithEtags.sort(Comparator.comparing(CourseWithEtag::getCourseId));
updatedCourseWithEtags.sort(Comparator.comparing(CourseWithEtag::getCourseId));
for (int i = 0; i < updatedCourseWithEtags.size(); i++) {
CourseWithEtag insertedCourseWithEtag = insertedCourseWithEtags.get(i);
CourseWithEtag updatedCourseWithEtag = updatedCourseWithEtags.get(i);
Assert.assertEquals(insertedCourseWithEtag.getCourseId(), updatedCourseWithEtag.getCourseId());
Assert.assertNotNull(updatedCourseWithEtag.getEtag());
Assert.assertNotEquals(insertedCourseWithEtag.getEtag(), updatedCourseWithEtag.getEtag());
}
}
@Test
public void testShouldFailIfEtagDoesNotMatch() {
Mono<CourseWithEtag> insertedCourseWithEtagMono = reactiveCourseWithEtagRepository.save(createCourseWithEtag());
CourseWithEtag insertedCourseWithEtag = insertedCourseWithEtagMono.block();
Assert.assertNotNull(insertedCourseWithEtag);
insertedCourseWithEtag.setName("CHANGED");
Mono<CourseWithEtag> updatedCourseWithEtagMono = reactiveCourseWithEtagRepository.save(insertedCourseWithEtag);
CourseWithEtag updatedCourseWithEtag = updatedCourseWithEtagMono.block();
Assert.assertNotNull(updatedCourseWithEtag);
updatedCourseWithEtag.setEtag(insertedCourseWithEtag.getEtag());
Mono<CourseWithEtag> courseMono = reactiveCourseWithEtagRepository.save(updatedCourseWithEtag);
StepVerifier.create(courseMono).verifyError(CosmosAccessException.class);
Mono<Void> deleteMono = reactiveCourseWithEtagRepository.delete(updatedCourseWithEtag);
StepVerifier.create(deleteMono).verifyError(CosmosAccessException.class);
}
@Test
} |
I see you have followed the correct usage of `StepVerifier` in the below `ReactiveRoleRepositoryIT` test class. | public void testBulkShouldFailIfEtagDoesNotMatch() {
Flux<CourseWithEtag> insertedCourseWithEtagFlux = reactiveCourseWithEtagRepository.saveAll(Flux.just(createCourseWithEtag()));
List<CourseWithEtag> insertedCourseWithEtag = insertedCourseWithEtagFlux.collectList().block();
Assert.assertEquals(insertedCourseWithEtag.size(), 1);
insertedCourseWithEtag.get(0).setName("CHANGED");
Flux<CourseWithEtag> updatedCourseWithEtagFlux = reactiveCourseWithEtagRepository.saveAll(insertedCourseWithEtag);
List<CourseWithEtag> updatedCourseWithEtag = updatedCourseWithEtagFlux.collectList().block();
Assert.assertEquals(updatedCourseWithEtag.size(), 1);
updatedCourseWithEtag.get(0).setEtag(insertedCourseWithEtag.get(0).getEtag());
Flux<CourseWithEtag> courseFlux = reactiveCourseWithEtagRepository.saveAll(updatedCourseWithEtag);
StepVerifier.create(courseFlux);
Assert.assertEquals(courseFlux.collectList().block().size(), 0);
reactiveCourseWithEtagRepository.deleteAll(updatedCourseWithEtag);
Flux<CourseWithEtag> courseFlux2 = reactiveCourseWithEtagRepository.findAll();
StepVerifier.create(courseFlux2);
Assert.assertEquals(courseFlux2.collectList().block().size(), 1);
} | Assert.assertEquals(courseFlux.collectList().block().size(), 0); | public void testBulkShouldFailIfEtagDoesNotMatch() {
Flux<CourseWithEtag> insertedCourseWithEtagFlux = reactiveCourseWithEtagRepository.saveAll(Flux.just(createCourseWithEtag()));
List<CourseWithEtag> insertedCourseWithEtag = insertedCourseWithEtagFlux.collectList().block();
Assert.assertEquals(insertedCourseWithEtag.size(), 1);
insertedCourseWithEtag.get(0).setName("CHANGED");
Flux<CourseWithEtag> updatedCourseWithEtagFlux = reactiveCourseWithEtagRepository.saveAll(insertedCourseWithEtag);
List<CourseWithEtag> updatedCourseWithEtag = updatedCourseWithEtagFlux.collectList().block();
Assert.assertEquals(updatedCourseWithEtag.size(), 1);
updatedCourseWithEtag.get(0).setEtag(insertedCourseWithEtag.get(0).getEtag());
Flux<CourseWithEtag> courseFlux = reactiveCourseWithEtagRepository.saveAll(updatedCourseWithEtag);
StepVerifier.create(courseFlux);
Assert.assertEquals(courseFlux.collectList().block().size(), 0);
reactiveCourseWithEtagRepository.deleteAll(updatedCourseWithEtag);
Flux<CourseWithEtag> courseFlux2 = reactiveCourseWithEtagRepository.findAll();
StepVerifier.create(courseFlux2);
Assert.assertEquals(courseFlux2.collectList().block().size(), 1);
} | class ReactiveEtagIT {
@ClassRule
public static final ReactiveIntegrationTestCollectionManager collectionManager = new ReactiveIntegrationTestCollectionManager();
@Autowired
ReactiveCosmosTemplate template;
@Autowired
ReactiveCourseWithEtagRepository reactiveCourseWithEtagRepository;
@Before
public void setup() {
collectionManager.ensureContainersCreatedAndEmpty(template, CourseWithEtag.class);
}
private static CourseWithEtag createCourseWithEtag() {
return new CourseWithEtag(UUID.randomUUID().toString(), COURSE_NAME, DEPARTMENT);
}
@Test
public void testCrudOperationsShouldApplyEtag() {
final Mono<CourseWithEtag> insertedCourseWithEtagMono =
reactiveCourseWithEtagRepository.save(createCourseWithEtag());
CourseWithEtag insertedCourseWithEtag = insertedCourseWithEtagMono.block();
Assert.assertNotNull(insertedCourseWithEtag);
Assert.assertNotNull(insertedCourseWithEtag.getEtag());
insertedCourseWithEtag.setName("CHANGED");
final Mono<CourseWithEtag> updatedCourseWithEtagMono =
reactiveCourseWithEtagRepository.save(insertedCourseWithEtag);
CourseWithEtag updatedCourseWithEtag = updatedCourseWithEtagMono.block();
Assert.assertNotNull(updatedCourseWithEtag);
Assert.assertNotNull(updatedCourseWithEtag.getEtag());
Assert.assertNotEquals(updatedCourseWithEtag.getEtag(), insertedCourseWithEtag.getEtag());
final Mono<CourseWithEtag> foundCourseWithEtagMono =
reactiveCourseWithEtagRepository.findById(insertedCourseWithEtag.getCourseId());
CourseWithEtag foundCourseWithEtag = foundCourseWithEtagMono.block();
Assert.assertNotNull(foundCourseWithEtag);
Assert.assertNotNull(foundCourseWithEtag.getEtag());
Assert.assertEquals(foundCourseWithEtag.getEtag(), updatedCourseWithEtag.getEtag());
}
@Test
public void testCrudListOperationsShouldApplyEtag() {
final List<CourseWithEtag> courses = new ArrayList<>();
courses.add(createCourseWithEtag());
courses.add(createCourseWithEtag());
final Flux<CourseWithEtag> insertedCourseWithEtagsFlux = reactiveCourseWithEtagRepository.saveAll(courses);
List<CourseWithEtag> insertedCourseWithEtags = insertedCourseWithEtagsFlux.collectList().block();
Assert.assertNotNull(insertedCourseWithEtags);
insertedCourseWithEtags.forEach(course -> Assert.assertNotNull(course.getEtag()));
insertedCourseWithEtags.forEach(course -> course.setName("CHANGED"));
final Flux<CourseWithEtag> updatedCourseWithEtagsFlux =
reactiveCourseWithEtagRepository.saveAll(insertedCourseWithEtags);
List<CourseWithEtag> updatedCourseWithEtags = updatedCourseWithEtagsFlux.collectList().block();
Assert.assertNotNull(updatedCourseWithEtags);
insertedCourseWithEtags.sort(Comparator.comparing(CourseWithEtag::getCourseId));
updatedCourseWithEtags.sort(Comparator.comparing(CourseWithEtag::getCourseId));
for (int i = 0; i < updatedCourseWithEtags.size(); i++) {
CourseWithEtag insertedCourseWithEtag = insertedCourseWithEtags.get(i);
CourseWithEtag updatedCourseWithEtag = updatedCourseWithEtags.get(i);
Assert.assertEquals(insertedCourseWithEtag.getCourseId(), updatedCourseWithEtag.getCourseId());
Assert.assertNotNull(updatedCourseWithEtag.getEtag());
Assert.assertNotEquals(insertedCourseWithEtag.getEtag(), updatedCourseWithEtag.getEtag());
}
}
@Test
public void testShouldFailIfEtagDoesNotMatch() {
Mono<CourseWithEtag> insertedCourseWithEtagMono = reactiveCourseWithEtagRepository.save(createCourseWithEtag());
CourseWithEtag insertedCourseWithEtag = insertedCourseWithEtagMono.block();
Assert.assertNotNull(insertedCourseWithEtag);
insertedCourseWithEtag.setName("CHANGED");
Mono<CourseWithEtag> updatedCourseWithEtagMono = reactiveCourseWithEtagRepository.save(insertedCourseWithEtag);
CourseWithEtag updatedCourseWithEtag = updatedCourseWithEtagMono.block();
Assert.assertNotNull(updatedCourseWithEtag);
updatedCourseWithEtag.setEtag(insertedCourseWithEtag.getEtag());
Mono<CourseWithEtag> courseMono = reactiveCourseWithEtagRepository.save(updatedCourseWithEtag);
StepVerifier.create(courseMono).verifyError(CosmosAccessException.class);
Mono<Void> deleteMono = reactiveCourseWithEtagRepository.delete(updatedCourseWithEtag);
StepVerifier.create(deleteMono).verifyError(CosmosAccessException.class);
}
@Test
} | class ReactiveEtagIT {
@ClassRule
public static final ReactiveIntegrationTestCollectionManager collectionManager = new ReactiveIntegrationTestCollectionManager();
@Autowired
ReactiveCosmosTemplate template;
@Autowired
ReactiveCourseWithEtagRepository reactiveCourseWithEtagRepository;
@Before
public void setup() {
collectionManager.ensureContainersCreatedAndEmpty(template, CourseWithEtag.class);
}
private static CourseWithEtag createCourseWithEtag() {
return new CourseWithEtag(UUID.randomUUID().toString(), COURSE_NAME, DEPARTMENT);
}
@Test
public void testCrudOperationsShouldApplyEtag() {
final Mono<CourseWithEtag> insertedCourseWithEtagMono =
reactiveCourseWithEtagRepository.save(createCourseWithEtag());
CourseWithEtag insertedCourseWithEtag = insertedCourseWithEtagMono.block();
Assert.assertNotNull(insertedCourseWithEtag);
Assert.assertNotNull(insertedCourseWithEtag.getEtag());
insertedCourseWithEtag.setName("CHANGED");
final Mono<CourseWithEtag> updatedCourseWithEtagMono =
reactiveCourseWithEtagRepository.save(insertedCourseWithEtag);
CourseWithEtag updatedCourseWithEtag = updatedCourseWithEtagMono.block();
Assert.assertNotNull(updatedCourseWithEtag);
Assert.assertNotNull(updatedCourseWithEtag.getEtag());
Assert.assertNotEquals(updatedCourseWithEtag.getEtag(), insertedCourseWithEtag.getEtag());
final Mono<CourseWithEtag> foundCourseWithEtagMono =
reactiveCourseWithEtagRepository.findById(insertedCourseWithEtag.getCourseId());
CourseWithEtag foundCourseWithEtag = foundCourseWithEtagMono.block();
Assert.assertNotNull(foundCourseWithEtag);
Assert.assertNotNull(foundCourseWithEtag.getEtag());
Assert.assertEquals(foundCourseWithEtag.getEtag(), updatedCourseWithEtag.getEtag());
}
@Test
public void testCrudListOperationsShouldApplyEtag() {
final List<CourseWithEtag> courses = new ArrayList<>();
courses.add(createCourseWithEtag());
courses.add(createCourseWithEtag());
final Flux<CourseWithEtag> insertedCourseWithEtagsFlux = reactiveCourseWithEtagRepository.saveAll(courses);
List<CourseWithEtag> insertedCourseWithEtags = insertedCourseWithEtagsFlux.collectList().block();
Assert.assertNotNull(insertedCourseWithEtags);
insertedCourseWithEtags.forEach(course -> Assert.assertNotNull(course.getEtag()));
insertedCourseWithEtags.forEach(course -> course.setName("CHANGED"));
final Flux<CourseWithEtag> updatedCourseWithEtagsFlux =
reactiveCourseWithEtagRepository.saveAll(insertedCourseWithEtags);
List<CourseWithEtag> updatedCourseWithEtags = updatedCourseWithEtagsFlux.collectList().block();
Assert.assertNotNull(updatedCourseWithEtags);
insertedCourseWithEtags.sort(Comparator.comparing(CourseWithEtag::getCourseId));
updatedCourseWithEtags.sort(Comparator.comparing(CourseWithEtag::getCourseId));
for (int i = 0; i < updatedCourseWithEtags.size(); i++) {
CourseWithEtag insertedCourseWithEtag = insertedCourseWithEtags.get(i);
CourseWithEtag updatedCourseWithEtag = updatedCourseWithEtags.get(i);
Assert.assertEquals(insertedCourseWithEtag.getCourseId(), updatedCourseWithEtag.getCourseId());
Assert.assertNotNull(updatedCourseWithEtag.getEtag());
Assert.assertNotEquals(insertedCourseWithEtag.getEtag(), updatedCourseWithEtag.getEtag());
}
}
@Test
public void testShouldFailIfEtagDoesNotMatch() {
Mono<CourseWithEtag> insertedCourseWithEtagMono = reactiveCourseWithEtagRepository.save(createCourseWithEtag());
CourseWithEtag insertedCourseWithEtag = insertedCourseWithEtagMono.block();
Assert.assertNotNull(insertedCourseWithEtag);
insertedCourseWithEtag.setName("CHANGED");
Mono<CourseWithEtag> updatedCourseWithEtagMono = reactiveCourseWithEtagRepository.save(insertedCourseWithEtag);
CourseWithEtag updatedCourseWithEtag = updatedCourseWithEtagMono.block();
Assert.assertNotNull(updatedCourseWithEtag);
updatedCourseWithEtag.setEtag(insertedCourseWithEtag.getEtag());
Mono<CourseWithEtag> courseMono = reactiveCourseWithEtagRepository.save(updatedCourseWithEtag);
StepVerifier.create(courseMono).verifyError(CosmosAccessException.class);
Mono<Void> deleteMono = reactiveCourseWithEtagRepository.delete(updatedCourseWithEtag);
StepVerifier.create(deleteMono).verifyError(CosmosAccessException.class);
}
@Test
} |
`UserAgentPolicy` is a per-call policy. However, our string replacement code should check that this only added once. | private HttpPipeline getHttpPipeline() {
CredentialValidator.validateSingleCredentialIsPresent(
storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER);
if (httpPipeline != null) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
boolean decryptionPolicyPresent = false;
for (int i = 0; i < httpPipeline.getPolicyCount(); i++) {
HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i);
if (currPolicy instanceof BlobDecryptionPolicy) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already"
+ " configured for encryption/decryption in a way that might conflict with the passed key "
+ "information. Please ensure that the passed pipeline is not already configured for "
+ "encryption/decryption"));
}
policies.add(currPolicy);
}
policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption));
return new HttpPipelineBuilder()
.httpClient(httpPipeline.getHttpClient())
.tracer(httpPipeline.getTracer())
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.build();
}
Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration;
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption));
String applicationId = clientOptions.getApplicationId() != null ? clientOptions.getApplicationId()
: logOptions.getApplicationId();
String modifiedUserAgent = modifyUserAgentString(applicationId, userAgentConfiguration);
policies.add(new UserAgentPolicy(modifiedUserAgent));
policies.add(new RequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER));
policies.add(new AddDatePolicy());
HttpHeaders headers = new HttpHeaders();
clientOptions.getHeaders().forEach(header -> headers.put(header.getName(), header.getValue()));
if (headers.getSize() > 0) {
policies.add(new AddHeadersPolicy(headers));
}
policies.add(new MetadataValidationPolicy());
if (storageSharedKeyCredential != null) {
policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential));
} else if (tokenCredential != null) {
BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER);
policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE));
} else if (azureSasCredential != null) {
policies.add(new AzureSasCredentialPolicy(azureSasCredential, false));
} else if (sasToken != null) {
policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false));
}
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new ResponseValidationPolicyBuilder()
.addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID)
.addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256)
.build());
policies.add(new HttpLoggingPolicy(logOptions));
policies.add(new ScrubEtagPolicy());
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(createTracer(clientOptions))
.build();
} | policies.add(new UserAgentPolicy(modifiedUserAgent)); | private HttpPipeline getHttpPipeline() {
CredentialValidator.validateSingleCredentialIsPresent(
storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER);
if (httpPipeline != null) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
boolean decryptionPolicyPresent = false;
for (int i = 0; i < httpPipeline.getPolicyCount(); i++) {
HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i);
if (currPolicy instanceof BlobDecryptionPolicy) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already"
+ " configured for encryption/decryption in a way that might conflict with the passed key "
+ "information. Please ensure that the passed pipeline is not already configured for "
+ "encryption/decryption"));
}
policies.add(currPolicy);
}
policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption));
return new HttpPipelineBuilder()
.httpClient(httpPipeline.getHttpClient())
.tracer(httpPipeline.getTracer())
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.build();
}
Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration;
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption));
String applicationId = clientOptions.getApplicationId() != null ? clientOptions.getApplicationId()
: logOptions.getApplicationId();
String modifiedUserAgent = modifyUserAgentString(applicationId, userAgentConfiguration);
policies.add(new UserAgentPolicy(modifiedUserAgent));
policies.add(new RequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER));
policies.add(new AddDatePolicy());
HttpHeaders headers = new HttpHeaders();
clientOptions.getHeaders().forEach(header -> headers.put(header.getName(), header.getValue()));
if (headers.getSize() > 0) {
policies.add(new AddHeadersPolicy(headers));
}
policies.add(new MetadataValidationPolicy());
if (storageSharedKeyCredential != null) {
policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential));
} else if (tokenCredential != null) {
BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER);
policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE));
} else if (azureSasCredential != null) {
policies.add(new AzureSasCredentialPolicy(azureSasCredential, false));
} else if (sasToken != null) {
policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false));
}
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new ResponseValidationPolicyBuilder()
.addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID)
.addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256)
.build());
policies.add(new HttpLoggingPolicy(logOptions));
policies.add(new ScrubEtagPolicy());
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(createTracer(clientOptions))
.build();
} | class EncryptedBlobClientBuilder implements
TokenCredentialTrait<EncryptedBlobClientBuilder>,
ConnectionStringTrait<EncryptedBlobClientBuilder>,
AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>,
AzureSasCredentialTrait<EncryptedBlobClientBuilder>,
HttpTrait<EncryptedBlobClientBuilder>,
ConfigurationTrait<EncryptedBlobClientBuilder>,
EndpointTrait<EncryptedBlobClientBuilder> {
private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class);
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-storage-blob-cryptography.properties");
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName");
private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion");
private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName");
private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion");
private static final String USER_AGENT_MODIFICATION_REGEX =
"(.*? )?(azsdk-java-azure-storage-blob/12\\.\\d{1,2}\\.\\d{1,2}(?:-beta\\.\\d{1,2})?)( .*?)?";
private String endpoint;
private String accountName;
private String containerName;
private String blobName;
private String snapshot;
private String versionId;
private boolean requiresEncryption;
private final EncryptionVersion encryptionVersion;
private StorageSharedKeyCredential storageSharedKeyCredential;
private TokenCredential tokenCredential;
private AzureSasCredential azureSasCredential;
private String sasToken;
private HttpClient httpClient;
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
private HttpLogOptions logOptions;
private RequestRetryOptions retryOptions;
private RetryOptions coreRetryOptions;
private HttpPipeline httpPipeline;
private ClientOptions clientOptions = new ClientOptions();
private Configuration configuration;
private AsyncKeyEncryptionKey keyWrapper;
private AsyncKeyEncryptionKeyResolver keyResolver;
private String keyWrapAlgorithm;
private BlobServiceVersion version;
private CpkInfo customerProvidedKey;
private EncryptionScope encryptionScope;
/**
* Creates a new instance of the EncryptedBlobClientBuilder
* @deprecated Use {@link EncryptedBlobClientBuilder
*/
@Deprecated
public EncryptedBlobClientBuilder() {
logOptions = getDefaultHttpLogOptions();
this.encryptionVersion = EncryptionVersion.V1;
LOGGER.warning("Client is being configured to use v1 of client side encryption, "
+ "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly"
+ "recommended the version be set to v2 using the constructor");
}
/**
* Creates a new instance of the EncryptedBlobClientbuilder.
*
* @param version The version of the client side encryption protocol to use. It is highly recommended that v2 be
* preferred for security reasons, though v1 continues to be supported for compatibility reasons. Note that even a
* client configured to encrypt using v2 can decrypt blobs that use the v1 protocol.
*/
public EncryptedBlobClientBuilder(EncryptionVersion version) {
Objects.requireNonNull(version);
logOptions = getDefaultHttpLogOptions();
this.encryptionVersion = version;
if (EncryptionVersion.V1.equals(this.encryptionVersion)) {
LOGGER.warning("Client is being configured to use v1 of client side encryption, "
+ "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly"
+ "recommended the version be set to v2 using the constructor");
}
}
/**
* Creates a {@link EncryptedBlobClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient -->
* <pre>
* EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder&
* .key&
* .keyResolver&
* .connectionString&
* .containerName&
* .blobName&
* .buildEncryptedBlobAsyncClient&
* </pre>
* <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient -->
*
* @return a {@link EncryptedBlobClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
* @throws IllegalStateException If multiple credentials have been specified.
* @throws IllegalStateException If both {@link
* and {@link
*/
public EncryptedBlobClient buildEncryptedBlobClient() {
return new EncryptedBlobClient(buildEncryptedBlobAsyncClient());
}
/**
* Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient -->
* <pre>
* EncryptedBlobClient client = new EncryptedBlobClientBuilder&
* .key&
* .keyResolver&
* .connectionString&
* .containerName&
* .blobName&
* .buildEncryptedBlobClient&
* </pre>
* <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient -->
*
* @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
* @throws IllegalStateException If multiple credentials have been specified.
* @throws IllegalStateException If both {@link
* and {@link
*/
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() {
Objects.requireNonNull(blobName, "'blobName' cannot be null.");
checkValidEncryptionParameters();
/*
Implicit and explicit root container access are functionally equivalent, but explicit references are easier
to read and debug.
*/
if (CoreUtils.isNullOrEmpty(containerName)) {
containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME;
}
BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest();
return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint,
serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope,
keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion, requiresEncryption);
}
private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
for (int i = 0; i < pipeline.getPolicyCount(); i++) {
HttpPipelinePolicy currPolicy = pipeline.getPolicy(i);
policies.add(currPolicy);
if (currPolicy instanceof UserAgentPolicy) {
policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION));
}
}
return new HttpPipelineBuilder()
.httpClient(pipeline.getHttpClient())
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.tracer(pipeline.getTracer())
.build();
}
private String modifyUserAgentString(String applicationId, Configuration userAgentConfiguration) {
Pattern pattern = Pattern.compile(USER_AGENT_MODIFICATION_REGEX);
String userAgent = UserAgentUtil.toUserAgentString(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION,
userAgentConfiguration);
Matcher matcher = pattern.matcher(userAgent);
String version = encryptionVersion == EncryptionVersion.V2 ? "2.0" : "1.0";
String stringToAppend = "azstorage-clientsideencryption/" + version;
if (matcher.matches()) {
String segment1 = matcher.group(1) == null ? "" : matcher.group(1);
String segment2 = matcher.group(2) == null ? "" : matcher.group(2);
String segment3 = matcher.group(3) == null ? "" : matcher.group(3);
userAgent = segment1 + stringToAppend + " " + segment2 + segment3;
}
return userAgent;
}
/**
* Sets the encryption key parameters for the client
*
* @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key
* @param keyWrapAlgorithm The {@link String} used to wrap the key.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) {
this.keyWrapper = key;
this.keyWrapAlgorithm = keyWrapAlgorithm;
return this;
}
/**
* Sets the encryption parameters for this client
*
* @param keyResolver The key resolver used to select the correct key for decrypting existing blobs.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) {
this.keyResolver = keyResolver;
return this;
}
private void checkValidEncryptionParameters() {
if (this.keyWrapper == null && this.keyResolver == null) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null"));
}
if (this.keyWrapper != null && this.keyWrapAlgorithm == null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key."));
}
}
/**
* Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service.
*
* @param credential {@link StorageSharedKeyCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) {
this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.tokenCredential = null;
this.sasToken = null;
return this;
}
/**
* Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service.
*
* @param credential {@link AzureNamedKeyCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential));
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential {@link TokenCredential} used to authorize requests sent to the service.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public EncryptedBlobClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.storageSharedKeyCredential = null;
this.sasToken = null;
return this;
}
/**
* Sets the SAS token used to authorize requests sent to the service.
*
* @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters
* (with or without a leading '?') and not a full url.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code sasToken} is {@code null}.
*/
public EncryptedBlobClientBuilder sasToken(String sasToken) {
this.sasToken = Objects.requireNonNull(sasToken,
"'sasToken' cannot be null.");
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
return this;
}
/**
* Sets the {@link AzureSasCredential} used to authorize requests sent to the service.
*
* @param credential {@link AzureSasCredential} used to authorize requests sent to the service.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public EncryptedBlobClientBuilder credential(AzureSasCredential credential) {
this.azureSasCredential = Objects.requireNonNull(credential,
"'credential' cannot be null.");
return this;
}
/**
* Clears the credential used to authorize the request.
*
* <p>This is for blobs that are publicly accessible.</p>
*
* @return the updated EncryptedBlobClientBuilder
*/
public EncryptedBlobClientBuilder setAnonymousAccess() {
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
this.azureSasCredential = null;
this.sasToken = null;
return this;
}
/**
* Sets the connection string to connect to the service.
*
* @param connectionString Connection string of the storage account.
* @return the updated EncryptedBlobClientBuilder
* @throws IllegalArgumentException If {@code connectionString} is invalid.
*/
@Override
public EncryptedBlobClientBuilder connectionString(String connectionString) {
StorageConnectionString storageConnectionString
= StorageConnectionString.create(connectionString, LOGGER);
StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint();
if (endpoint == null || endpoint.getPrimaryUri() == null) {
throw LOGGER
.logExceptionAsError(new IllegalArgumentException(
"connectionString missing required settings to derive blob service endpoint."));
}
this.endpoint(endpoint.getPrimaryUri());
if (storageConnectionString.getAccountName() != null) {
this.accountName = storageConnectionString.getAccountName();
}
StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings();
if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) {
this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(),
authSettings.getAccount().getAccessKey()));
} else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) {
this.sasToken(authSettings.getSasToken());
}
return this;
}
/**
* Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name)
*
* <p>If the blob name contains special characters, pass in the url encoded version of the blob name. </p>
*
* <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name
* as the container name. With only one path element, it is impossible to distinguish between a container name and a
* blob in the root container, so it is assumed to be the container name as this is much more common. When working
* with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name
* separately using the {@link EncryptedBlobClientBuilder
*
* @param endpoint URL of the service
* @return the updated EncryptedBlobClientBuilder object
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL.
*/
@Override
public EncryptedBlobClientBuilder endpoint(String endpoint) {
try {
URL url = new URL(endpoint);
BlobUrlParts parts = BlobUrlParts.parse(url);
this.accountName = parts.getAccountName();
this.endpoint = BuilderHelper.getEndpoint(parts);
this.containerName = parts.getBlobContainerName() == null ? this.containerName
: parts.getBlobContainerName();
this.blobName = parts.getBlobName() == null ? this.blobName : Utility.urlEncode(parts.getBlobName());
this.snapshot = parts.getSnapshot();
this.versionId = parts.getVersionId();
String sasToken = parts.getCommonSasQueryParameters().encode();
if (!CoreUtils.isNullOrEmpty(sasToken)) {
this.sasToken(sasToken);
}
} catch (MalformedURLException ex) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex));
}
return this;
}
/**
* Sets the name of the container that contains the blob.
*
* @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root},
* will be used.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder containerName(String containerName) {
this.containerName = containerName;
return this;
}
/**
* Sets the name of the blob.
*
* @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version
* of the blob name.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code blobName} is {@code null}
*/
public EncryptedBlobClientBuilder blobName(String blobName) {
this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName,
"'blobName' cannot be null.")));
return this;
}
/**
* Sets the snapshot identifier of the blob.
*
* @param snapshot Snapshot identifier for the blob.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder snapshot(String snapshot) {
this.snapshot = snapshot;
return this;
}
/**
* Sets the version identifier of the blob.
*
* @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder versionId(String versionId) {
this.versionId = versionId;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return the updated EncryptedBlobClientBuilder object
*/
@Override
public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
LOGGER.info("'httpClient' is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code pipelinePolicy} is {@code null}.
*/
@Override
public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) {
Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null");
if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(pipelinePolicy);
} else {
perRetryPolicies.add(pipelinePolicy);
}
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to
* and from the service.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code logOptions} is {@code null}.
*/
@Override
public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Gets the default Storage allowlist log headers and query parameters.
*
* @return the default http log options.
*/
public static HttpLogOptions getDefaultHttpLogOptions() {
return BuilderHelper.getDefaultHttpLogOptions();
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated EncryptedBlobClientBuilder object
*/
@Override
public EncryptedBlobClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the request retry options for all the requests made through the client.
*
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions {@link RequestRetryOptions}.
* @return the updated EncryptedBlobClientBuilder object.
*/
public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
* Consider using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return the updated EncryptedBlobClientBuilder object
*/
@Override
public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) {
this.coreRetryOptions = retryOptions;
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* The {@link
* {@link
* not ignored when {@code pipeline} is set.
*
* @return the updated EncryptedBlobClientBuilder object
*/
@Override
public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @see HttpClientOptions
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code clientOptions} is {@code null}.
*/
@Override
public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null.");
return this;
}
/**
* Sets the {@link BlobServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server.
*
* @param customerProvidedKey {@link CustomerProvidedKey}
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) {
if (customerProvidedKey == null) {
this.customerProvidedKey = null;
} else {
this.customerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return this;
}
/**
* Sets the {@code encryption scope} that is used to encrypt blob contents on the server.
*
* @param encryptionScope Encryption scope containing the encryption key information.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) {
if (encryptionScope == null) {
this.encryptionScope = null;
} else {
this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope);
}
return this;
}
/**
* Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying
* pipeline should not already be configured for encryption/decryption.
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and
* encryption scope properties from the provided client. To set CPK, please use
* {@link
*
* @param blobClient BlobClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) {
Objects.requireNonNull(blobClient);
return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion());
}
/**
* Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying
* pipeline should not already be configured for encryption/decryption.
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and
* encryption scope properties from the provided client. To set CPK, please use
* {@link
*
* @param blobAsyncClient BlobAsyncClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) {
Objects.requireNonNull(blobAsyncClient);
return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(),
blobAsyncClient.getServiceVersion());
}
/**
* Helper method to transform a regular client into an encrypted client
*
* @param httpPipeline {@link HttpPipeline}
* @param endpoint The endpoint.
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) {
this.endpoint(endpoint);
this.serviceVersion(version);
return this.pipeline(httpPipeline);
}
/**
* Sets the requires encryption option.
*
* @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is
* downloaded and it is not encrypted.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) {
this.requiresEncryption = requiresEncryption;
return this;
}
} | class EncryptedBlobClientBuilder implements
TokenCredentialTrait<EncryptedBlobClientBuilder>,
ConnectionStringTrait<EncryptedBlobClientBuilder>,
AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>,
AzureSasCredentialTrait<EncryptedBlobClientBuilder>,
HttpTrait<EncryptedBlobClientBuilder>,
ConfigurationTrait<EncryptedBlobClientBuilder>,
EndpointTrait<EncryptedBlobClientBuilder> {
private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class);
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-storage-blob-cryptography.properties");
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName");
private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion");
private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName");
private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion");
private static final String USER_AGENT_MODIFICATION_REGEX =
"(.*? )?(azsdk-java-azure-storage-blob/12\\.\\d{1,2}\\.\\d{1,2}(?:-beta\\.\\d{1,2})?)( .*?)?";
private String endpoint;
private String accountName;
private String containerName;
private String blobName;
private String snapshot;
private String versionId;
private boolean requiresEncryption;
private final EncryptionVersion encryptionVersion;
private StorageSharedKeyCredential storageSharedKeyCredential;
private TokenCredential tokenCredential;
private AzureSasCredential azureSasCredential;
private String sasToken;
private HttpClient httpClient;
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
private HttpLogOptions logOptions;
private RequestRetryOptions retryOptions;
private RetryOptions coreRetryOptions;
private HttpPipeline httpPipeline;
private ClientOptions clientOptions = new ClientOptions();
private Configuration configuration;
private AsyncKeyEncryptionKey keyWrapper;
private AsyncKeyEncryptionKeyResolver keyResolver;
private String keyWrapAlgorithm;
private BlobServiceVersion version;
private CpkInfo customerProvidedKey;
private EncryptionScope encryptionScope;
/**
* Creates a new instance of the EncryptedBlobClientBuilder
* @deprecated Use {@link EncryptedBlobClientBuilder
*/
@Deprecated
public EncryptedBlobClientBuilder() {
logOptions = getDefaultHttpLogOptions();
this.encryptionVersion = EncryptionVersion.V1;
LOGGER.warning("Client is being configured to use v1 of client side encryption, "
+ "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly"
+ "recommended the version be set to v2 using the constructor");
}
/**
* Creates a new instance of the EncryptedBlobClientbuilder.
*
* @param version The version of the client side encryption protocol to use. It is highly recommended that v2 be
* preferred for security reasons, though v1 continues to be supported for compatibility reasons. Note that even a
* client configured to encrypt using v2 can decrypt blobs that use the v1 protocol.
*/
public EncryptedBlobClientBuilder(EncryptionVersion version) {
Objects.requireNonNull(version);
logOptions = getDefaultHttpLogOptions();
this.encryptionVersion = version;
if (EncryptionVersion.V1.equals(this.encryptionVersion)) {
LOGGER.warning("Client is being configured to use v1 of client side encryption, "
+ "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly"
+ "recommended the version be set to v2 using the constructor");
}
}
/**
* Creates a {@link EncryptedBlobClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient -->
* <pre>
* EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder&
* .key&
* .keyResolver&
* .connectionString&
* .containerName&
* .blobName&
* .buildEncryptedBlobAsyncClient&
* </pre>
* <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient -->
*
* @return a {@link EncryptedBlobClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
* @throws IllegalStateException If multiple credentials have been specified.
* @throws IllegalStateException If both {@link
* and {@link
*/
public EncryptedBlobClient buildEncryptedBlobClient() {
return new EncryptedBlobClient(buildEncryptedBlobAsyncClient());
}
/**
* Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient -->
* <pre>
* EncryptedBlobClient client = new EncryptedBlobClientBuilder&
* .key&
* .keyResolver&
* .connectionString&
* .containerName&
* .blobName&
* .buildEncryptedBlobClient&
* </pre>
* <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient -->
*
* @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
* @throws IllegalStateException If multiple credentials have been specified.
* @throws IllegalStateException If both {@link
* and {@link
*/
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() {
Objects.requireNonNull(blobName, "'blobName' cannot be null.");
checkValidEncryptionParameters();
/*
Implicit and explicit root container access are functionally equivalent, but explicit references are easier
to read and debug.
*/
if (CoreUtils.isNullOrEmpty(containerName)) {
containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME;
}
BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest();
return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint,
serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope,
keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion, requiresEncryption);
}
private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
for (int i = 0; i < pipeline.getPolicyCount(); i++) {
HttpPipelinePolicy currPolicy = pipeline.getPolicy(i);
policies.add(currPolicy);
if (currPolicy instanceof UserAgentPolicy) {
policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION));
}
}
return new HttpPipelineBuilder()
.httpClient(pipeline.getHttpClient())
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.tracer(pipeline.getTracer())
.build();
}
private String modifyUserAgentString(String applicationId, Configuration userAgentConfiguration) {
Pattern pattern = Pattern.compile(USER_AGENT_MODIFICATION_REGEX);
String userAgent = UserAgentUtil.toUserAgentString(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION,
userAgentConfiguration);
Matcher matcher = pattern.matcher(userAgent);
String version = encryptionVersion == EncryptionVersion.V2 ? "2.0" : "1.0";
String stringToAppend = "azstorage-clientsideencryption/" + version;
if (matcher.matches() && !userAgent.contains(stringToAppend)) {
String segment1 = matcher.group(1) == null ? "" : matcher.group(1);
String segment2 = matcher.group(2) == null ? "" : matcher.group(2);
String segment3 = matcher.group(3) == null ? "" : matcher.group(3);
userAgent = segment1 + stringToAppend + " " + segment2 + segment3;
}
return userAgent;
}
/**
* Sets the encryption key parameters for the client
*
* @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key
* @param keyWrapAlgorithm The {@link String} used to wrap the key.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) {
this.keyWrapper = key;
this.keyWrapAlgorithm = keyWrapAlgorithm;
return this;
}
/**
* Sets the encryption parameters for this client
*
* @param keyResolver The key resolver used to select the correct key for decrypting existing blobs.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) {
this.keyResolver = keyResolver;
return this;
}
private void checkValidEncryptionParameters() {
if (this.keyWrapper == null && this.keyResolver == null) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null"));
}
if (this.keyWrapper != null && this.keyWrapAlgorithm == null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key."));
}
}
/**
* Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service.
*
* @param credential {@link StorageSharedKeyCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) {
this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.tokenCredential = null;
this.sasToken = null;
return this;
}
/**
* Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service.
*
* @param credential {@link AzureNamedKeyCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential));
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential {@link TokenCredential} used to authorize requests sent to the service.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public EncryptedBlobClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.storageSharedKeyCredential = null;
this.sasToken = null;
return this;
}
/**
* Sets the SAS token used to authorize requests sent to the service.
*
* @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters
* (with or without a leading '?') and not a full url.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code sasToken} is {@code null}.
*/
public EncryptedBlobClientBuilder sasToken(String sasToken) {
this.sasToken = Objects.requireNonNull(sasToken,
"'sasToken' cannot be null.");
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
return this;
}
/**
* Sets the {@link AzureSasCredential} used to authorize requests sent to the service.
*
* @param credential {@link AzureSasCredential} used to authorize requests sent to the service.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public EncryptedBlobClientBuilder credential(AzureSasCredential credential) {
this.azureSasCredential = Objects.requireNonNull(credential,
"'credential' cannot be null.");
return this;
}
/**
* Clears the credential used to authorize the request.
*
* <p>This is for blobs that are publicly accessible.</p>
*
* @return the updated EncryptedBlobClientBuilder
*/
public EncryptedBlobClientBuilder setAnonymousAccess() {
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
this.azureSasCredential = null;
this.sasToken = null;
return this;
}
/**
* Sets the connection string to connect to the service.
*
* @param connectionString Connection string of the storage account.
* @return the updated EncryptedBlobClientBuilder
* @throws IllegalArgumentException If {@code connectionString} is invalid.
*/
@Override
public EncryptedBlobClientBuilder connectionString(String connectionString) {
StorageConnectionString storageConnectionString
= StorageConnectionString.create(connectionString, LOGGER);
StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint();
if (endpoint == null || endpoint.getPrimaryUri() == null) {
throw LOGGER
.logExceptionAsError(new IllegalArgumentException(
"connectionString missing required settings to derive blob service endpoint."));
}
this.endpoint(endpoint.getPrimaryUri());
if (storageConnectionString.getAccountName() != null) {
this.accountName = storageConnectionString.getAccountName();
}
StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings();
if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) {
this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(),
authSettings.getAccount().getAccessKey()));
} else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) {
this.sasToken(authSettings.getSasToken());
}
return this;
}
/**
* Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name)
*
* <p>If the blob name contains special characters, pass in the url encoded version of the blob name. </p>
*
* <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name
* as the container name. With only one path element, it is impossible to distinguish between a container name and a
* blob in the root container, so it is assumed to be the container name as this is much more common. When working
* with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name
* separately using the {@link EncryptedBlobClientBuilder
*
* @param endpoint URL of the service
* @return the updated EncryptedBlobClientBuilder object
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL.
*/
@Override
public EncryptedBlobClientBuilder endpoint(String endpoint) {
try {
URL url = new URL(endpoint);
BlobUrlParts parts = BlobUrlParts.parse(url);
this.accountName = parts.getAccountName();
this.endpoint = BuilderHelper.getEndpoint(parts);
this.containerName = parts.getBlobContainerName() == null ? this.containerName
: parts.getBlobContainerName();
this.blobName = parts.getBlobName() == null ? this.blobName : Utility.urlEncode(parts.getBlobName());
this.snapshot = parts.getSnapshot();
this.versionId = parts.getVersionId();
String sasToken = parts.getCommonSasQueryParameters().encode();
if (!CoreUtils.isNullOrEmpty(sasToken)) {
this.sasToken(sasToken);
}
} catch (MalformedURLException ex) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex));
}
return this;
}
/**
* Sets the name of the container that contains the blob.
*
* @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root},
* will be used.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder containerName(String containerName) {
this.containerName = containerName;
return this;
}
/**
* Sets the name of the blob.
*
* @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version
* of the blob name.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code blobName} is {@code null}
*/
public EncryptedBlobClientBuilder blobName(String blobName) {
this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName,
"'blobName' cannot be null.")));
return this;
}
/**
* Sets the snapshot identifier of the blob.
*
* @param snapshot Snapshot identifier for the blob.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder snapshot(String snapshot) {
this.snapshot = snapshot;
return this;
}
/**
* Sets the version identifier of the blob.
*
* @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder versionId(String versionId) {
this.versionId = versionId;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return the updated EncryptedBlobClientBuilder object
*/
@Override
public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
LOGGER.info("'httpClient' is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code pipelinePolicy} is {@code null}.
*/
@Override
public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) {
Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null");
if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(pipelinePolicy);
} else {
perRetryPolicies.add(pipelinePolicy);
}
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to
* and from the service.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code logOptions} is {@code null}.
*/
@Override
public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Gets the default Storage allowlist log headers and query parameters.
*
* @return the default http log options.
*/
public static HttpLogOptions getDefaultHttpLogOptions() {
return BuilderHelper.getDefaultHttpLogOptions();
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated EncryptedBlobClientBuilder object
*/
@Override
public EncryptedBlobClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the request retry options for all the requests made through the client.
*
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions {@link RequestRetryOptions}.
* @return the updated EncryptedBlobClientBuilder object.
*/
public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
* Consider using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return the updated EncryptedBlobClientBuilder object
*/
@Override
public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) {
this.coreRetryOptions = retryOptions;
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* The {@link
* {@link
* not ignored when {@code pipeline} is set.
*
* @return the updated EncryptedBlobClientBuilder object
*/
@Override
public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @see HttpClientOptions
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code clientOptions} is {@code null}.
*/
@Override
public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null.");
return this;
}
/**
* Sets the {@link BlobServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server.
*
* @param customerProvidedKey {@link CustomerProvidedKey}
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) {
if (customerProvidedKey == null) {
this.customerProvidedKey = null;
} else {
this.customerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return this;
}
/**
* Sets the {@code encryption scope} that is used to encrypt blob contents on the server.
*
* @param encryptionScope Encryption scope containing the encryption key information.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) {
if (encryptionScope == null) {
this.encryptionScope = null;
} else {
this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope);
}
return this;
}
/**
* Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying
* pipeline should not already be configured for encryption/decryption.
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and
* encryption scope properties from the provided client. To set CPK, please use
* {@link
*
* @param blobClient BlobClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) {
Objects.requireNonNull(blobClient);
return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion());
}
/**
* Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying
* pipeline should not already be configured for encryption/decryption.
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and
* encryption scope properties from the provided client. To set CPK, please use
* {@link
*
* @param blobAsyncClient BlobAsyncClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) {
Objects.requireNonNull(blobAsyncClient);
return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(),
blobAsyncClient.getServiceVersion());
}
/**
* Helper method to transform a regular client into an encrypted client
*
* @param httpPipeline {@link HttpPipeline}
* @param endpoint The endpoint.
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) {
this.endpoint(endpoint);
this.serviceVersion(version);
return this.pipeline(httpPipeline);
}
/**
* Sets the requires encryption option.
*
* @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is
* downloaded and it is not encrypted.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) {
this.requiresEncryption = requiresEncryption;
return this;
}
} |
why a new set here? We already create a new set when initializing this? | public Set<String> getExcludedRegions() {
if (this.excludedRegions == null || this.excludedRegions.isEmpty()) {
return new HashSet<>();
}
return new HashSet<>(this.excludedRegions);
} | return new HashSet<>(this.excludedRegions); | public Set<String> getExcludedRegions() {
if (this.excludedRegions == null || this.excludedRegions.isEmpty()) {
return new HashSet<>();
}
return new HashSet<>(this.excludedRegions);
} | class CosmosExcludedRegions {
private final Set<String> excludedRegions;
private final String excludedRegionsAsString;
private static final Pattern SPACE_PATTERN = Pattern.compile(" ");
/**
* Instantiates {@code CosmosExcludedRegions}.
*
* @param excludedRegions the set of regions to exclude.
* @throws IllegalArgumentException if {@code excludedRegions} is set as null.
* */
public CosmosExcludedRegions(Set<String> excludedRegions) {
checkArgument(excludedRegions != null, "excludedRegions cannot be set to null");
this.excludedRegions = new HashSet<>(excludedRegions);
this.excludedRegionsAsString = stringifyExcludedRegions(this.excludedRegions);
}
/**
* Gets the set of excluded regions.
*
* @return a set of excluded regions.
* */
@Override
public String toString() {
return this.excludedRegionsAsString;
}
private static String stringifyExcludedRegions(Set<String> excludedRegions) {
String substring = "";
if (excludedRegions == null || excludedRegions.isEmpty()) {
substring = "";
} else {
substring = excludedRegions
.stream()
.map(r -> SPACE_PATTERN.matcher(r.toLowerCase(Locale.ROOT)).replaceAll(""))
.collect(Collectors.joining(","));
}
return "[" + substring + "]";
}
} | class CosmosExcludedRegions {
private final Set<String> excludedRegions;
private final String excludedRegionsAsString;
private static final Pattern SPACE_PATTERN = Pattern.compile(" ");
/**
* Instantiates {@code CosmosExcludedRegions}.
*
* @param excludedRegions the set of regions to exclude.
* @throws IllegalArgumentException if {@code excludedRegions} is set as null.
* */
public CosmosExcludedRegions(Set<String> excludedRegions) {
checkArgument(excludedRegions != null, "excludedRegions cannot be set to null");
this.excludedRegions = new HashSet<>(excludedRegions);
this.excludedRegionsAsString = stringifyExcludedRegions(this.excludedRegions);
}
/**
* Gets the set of excluded regions.
*
* @return a set of excluded regions.
* */
@Override
public String toString() {
return this.excludedRegionsAsString;
}
private static String stringifyExcludedRegions(Set<String> excludedRegions) {
String substring = "";
if (excludedRegions == null || excludedRegions.isEmpty()) {
substring = "";
} else {
substring = excludedRegions
.stream()
.map(r -> SPACE_PATTERN.matcher(r.toLowerCase(Locale.ROOT)).replaceAll(""))
.collect(Collectors.joining(","));
}
return "[" + substring + "]";
}
} |
If we are providing a getter, I thought might as well create a copy to prevent side-effects. Also, the CX may want to perform some `Set`-based operations such as `add()` or `remove()`. | public Set<String> getExcludedRegions() {
if (this.excludedRegions == null || this.excludedRegions.isEmpty()) {
return new HashSet<>();
}
return new HashSet<>(this.excludedRegions);
} | return new HashSet<>(this.excludedRegions); | public Set<String> getExcludedRegions() {
if (this.excludedRegions == null || this.excludedRegions.isEmpty()) {
return new HashSet<>();
}
return new HashSet<>(this.excludedRegions);
} | class CosmosExcludedRegions {
private final Set<String> excludedRegions;
private final String excludedRegionsAsString;
private static final Pattern SPACE_PATTERN = Pattern.compile(" ");
/**
* Instantiates {@code CosmosExcludedRegions}.
*
* @param excludedRegions the set of regions to exclude.
* @throws IllegalArgumentException if {@code excludedRegions} is set as null.
* */
public CosmosExcludedRegions(Set<String> excludedRegions) {
checkArgument(excludedRegions != null, "excludedRegions cannot be set to null");
this.excludedRegions = new HashSet<>(excludedRegions);
this.excludedRegionsAsString = stringifyExcludedRegions(this.excludedRegions);
}
/**
* Gets the set of excluded regions.
*
* @return a set of excluded regions.
* */
@Override
public String toString() {
return this.excludedRegionsAsString;
}
private static String stringifyExcludedRegions(Set<String> excludedRegions) {
String substring = "";
if (excludedRegions == null || excludedRegions.isEmpty()) {
substring = "";
} else {
substring = excludedRegions
.stream()
.map(r -> SPACE_PATTERN.matcher(r.toLowerCase(Locale.ROOT)).replaceAll(""))
.collect(Collectors.joining(","));
}
return "[" + substring + "]";
}
} | class CosmosExcludedRegions {
private final Set<String> excludedRegions;
private final String excludedRegionsAsString;
private static final Pattern SPACE_PATTERN = Pattern.compile(" ");
/**
* Instantiates {@code CosmosExcludedRegions}.
*
* @param excludedRegions the set of regions to exclude.
* @throws IllegalArgumentException if {@code excludedRegions} is set as null.
* */
public CosmosExcludedRegions(Set<String> excludedRegions) {
checkArgument(excludedRegions != null, "excludedRegions cannot be set to null");
this.excludedRegions = new HashSet<>(excludedRegions);
this.excludedRegionsAsString = stringifyExcludedRegions(this.excludedRegions);
}
/**
* Gets the set of excluded regions.
*
* @return a set of excluded regions.
* */
@Override
public String toString() {
return this.excludedRegionsAsString;
}
private static String stringifyExcludedRegions(Set<String> excludedRegions) {
String substring = "";
if (excludedRegions == null || excludedRegions.isEmpty()) {
substring = "";
} else {
substring = excludedRegions
.stream()
.map(r -> SPACE_PATTERN.matcher(r.toLowerCase(Locale.ROOT)).replaceAll(""))
.collect(Collectors.joining(","));
}
return "[" + substring + "]";
}
} |
I think the tests are failing because we need a fill-in value here for playback. | public void testMetricsBatchQuery() {
MetricsBatchQueryClient metricsBatchQueryClient = clientBuilder
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.buildClient();
String resourceId = Configuration.getGlobalConfiguration().get("AZURE_MONITOR_METRICS_RESOURCE_URI_2", FAKE_RESOURCE_ID);
resourceId = resourceId.substring(resourceId.indexOf("/subscriptions"));
ConfigurationClient configClient = new ConfigurationClientBuilder()
.connectionString(Configuration.getGlobalConfiguration().get("AZURE_APPCONFIG_CONNECTION_STRING"))
.buildClient();
try {
configClient.getConfigurationSetting("foo", "bar");
} catch (HttpResponseException exception) {
}
MetricsQueryOptions options = new MetricsQueryOptions()
.setGranularity(Duration.ofMinutes(15))
.setTop(10)
.setTimeInterval(new QueryTimeInterval(OffsetDateTime.now().minusDays(1), OffsetDateTime.now()));
MetricsBatchResult metricsQueryResults = metricsBatchQueryClient.queryBatchWithResponse(
Arrays.asList(resourceId),
Arrays.asList("HttpIncomingRequestCount"), "microsoft.appconfiguration/configurationstores", options, Context.NONE)
.getValue();
assertEquals(1, metricsQueryResults.getMetricsQueryResults().size());
assertEquals(1, metricsQueryResults.getMetricsQueryResults().get(0).getMetrics().size());
MetricResult metricResult = metricsQueryResults.getMetricsQueryResults().get(0).getMetrics().get(0);
assertEquals("HttpIncomingRequestCount", metricResult.getMetricName());
assertFalse(CoreUtils.isNullOrEmpty(metricResult.getTimeSeries()));
} | .connectionString(Configuration.getGlobalConfiguration().get("AZURE_APPCONFIG_CONNECTION_STRING")) | public void testMetricsBatchQuery() {
MetricsBatchQueryClient metricsBatchQueryClient = clientBuilder
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.buildClient();
String resourceId = Configuration.getGlobalConfiguration().get("AZURE_MONITOR_METRICS_RESOURCE_URI_2", FAKE_RESOURCE_ID);
resourceId = resourceId.substring(resourceId.indexOf("/subscriptions"));
try {
configClient.getConfigurationSetting("foo", "bar");
} catch (HttpResponseException exception) {
}
MetricsQueryOptions options = new MetricsQueryOptions()
.setGranularity(Duration.ofMinutes(15))
.setTop(10)
.setTimeInterval(new QueryTimeInterval(OffsetDateTime.now().minusDays(1), OffsetDateTime.now()));
MetricsBatchResult metricsQueryResults = metricsBatchQueryClient.queryBatchWithResponse(
Arrays.asList(resourceId),
Arrays.asList("HttpIncomingRequestCount"), "microsoft.appconfiguration/configurationstores", options, Context.NONE)
.getValue();
assertEquals(1, metricsQueryResults.getMetricsQueryResults().size());
assertEquals(1, metricsQueryResults.getMetricsQueryResults().get(0).getMetrics().size());
MetricResult metricResult = metricsQueryResults.getMetricsQueryResults().get(0).getMetrics().get(0);
assertEquals("HttpIncomingRequestCount", metricResult.getMetricName());
assertFalse(CoreUtils.isNullOrEmpty(metricResult.getTimeSeries()));
} | class MetricsBatchQueryClientTest extends MetricsBatchQueryTestBase {
@Test
@Test
public void testMetricsBatchQueryDifferentResourceTypes() {
MetricsBatchQueryClient metricsBatchQueryClient = clientBuilder
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.buildClient();
String resourceId1 = Configuration.getGlobalConfiguration().get("AZURE_MONITOR_METRICS_RESOURCE_URI_1", FAKE_RESOURCE_ID);
String resourceId2 = Configuration.getGlobalConfiguration().get("AZURE_MONITOR_METRICS_RESOURCE_URI_2", FAKE_RESOURCE_ID);
String updatedResource1 = resourceId1.substring(resourceId1.indexOf("/subscriptions"));
String updatedResource2 = resourceId2.substring(resourceId2.indexOf("/subscriptions"));
assertThrows(HttpResponseException.class, () -> metricsBatchQueryClient.queryBatch(
Arrays.asList(updatedResource1, updatedResource2),
Arrays.asList("Successful Requests"), " Microsoft.Eventhub/Namespaces"));
}
} | class MetricsBatchQueryClientTest extends MetricsBatchQueryTestBase {
@Test
@Test
public void testMetricsBatchQueryDifferentResourceTypes() {
MetricsBatchQueryClient metricsBatchQueryClient = clientBuilder
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.buildClient();
String resourceId1 = Configuration.getGlobalConfiguration().get("AZURE_MONITOR_METRICS_RESOURCE_URI_1", FAKE_RESOURCE_ID);
String resourceId2 = Configuration.getGlobalConfiguration().get("AZURE_MONITOR_METRICS_RESOURCE_URI_2", FAKE_RESOURCE_ID);
String updatedResource1 = resourceId1.substring(resourceId1.indexOf("/subscriptions"));
String updatedResource2 = resourceId2.substring(resourceId2.indexOf("/subscriptions"));
assertThrows(HttpResponseException.class, () -> metricsBatchQueryClient.queryBatch(
Arrays.asList(updatedResource1, updatedResource2),
Arrays.asList("Successful Requests"), " Microsoft.Eventhub/Namespaces"));
}
} |
Since you already use encodeResourceId to process in code, should you use another way to verify it is valid URL? `new URI` e.g.? | private WireMockServer startMockServer() {
ResponseTransformer transformer = new ResponseTransformer() {
@Override
public Response transform(Request request, Response response, FileSource fileSource, Parameters parameters) {
Map<String, Object> responseBody = new HashMap<>();
Map<String, Object> vm;
VirtualMachineInner vmInner = mockVmInner();
try {
vm = SERIALIZER.deserialize(SERIALIZER.serialize(vmInner, SerializerEncoding.JSON), Map.class, SerializerEncoding.JSON);
} catch (IOException e) {
return failedResponse(e.getMessage());
}
vm.put("name", "vmName");
if (request.getUrl().contains("Microsoft.Compute/virtualMachines")) {
stateHolder.firstPageRequested = true;
responseBody.put("value", Collections.singletonList(vm));
responseBody.put("nextLink", stateHolder.nextLinkUrl);
return successResponse(responseBody);
} else if (request.getUrl().contains(NEXT_LINK_PATH)) {
stateHolder.secondPageRequested = true;
if (ResourceUtils.encodeResourceId(request.getUrl()).equals(request.getUrl())) {
responseBody.put("value", Collections.singletonList(vm));
return successResponse(responseBody);
} else {
return failedResponse("Next link not encoded: " + request.getUrl());
}
} else {
return failedResponse("Unexpected request: " + request.getUrl());
}
}
@Override
public String getName() {
return "listByVmssId";
}
};
WireMockServer mockServer = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
mockServer.stubFor(WireMock.any(WireMock.anyUrl()).willReturn(WireMock.aResponse()));
mockServer.start();
stateHolder.nextLinkUrl = String.format("%s%s?filter=%s", mockServer.baseUrl(), NEXT_LINK_PATH, QUERY);
return mockServer;
} | if (ResourceUtils.encodeResourceId(request.getUrl()).equals(request.getUrl())) { | private WireMockServer startMockServer() {
ResponseTransformer transformer = new ResponseTransformer() {
@Override
public Response transform(Request request, Response response, FileSource fileSource, Parameters parameters) {
Map<String, Object> responseBody = new HashMap<>();
Map<String, Object> vm;
VirtualMachineInner vmInner = mockVmInner();
try {
vm = SERIALIZER.deserialize(SERIALIZER.serialize(vmInner, SerializerEncoding.JSON), Map.class, SerializerEncoding.JSON);
} catch (IOException e) {
return failedResponse(e.getMessage());
}
vm.put("name", "vmName");
if (request.getUrl().contains("Microsoft.Compute/virtualMachines")) {
stateHolder.firstPageRequested = true;
responseBody.put("value", Collections.singletonList(vm));
responseBody.put("nextLink", stateHolder.nextLinkUrl);
return successResponse(responseBody);
} else if (request.getUrl().contains(NEXT_LINK_PATH)) {
stateHolder.secondPageRequested = true;
try {
new URI(request.getUrl());
responseBody.put("value", Collections.singletonList(vm));
return successResponse(responseBody);
} catch (URISyntaxException e) {
return failedResponse("Next link not encoded: " + request.getUrl());
}
} else {
return failedResponse("Unexpected request: " + request.getUrl());
}
}
@Override
public String getName() {
return "listByVmssId";
}
};
WireMockServer mockServer = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
mockServer.stubFor(WireMock.any(WireMock.anyUrl()).willReturn(WireMock.aResponse()));
mockServer.start();
stateHolder.nextLinkUrl = String.format("%s%s?filter=%s", mockServer.baseUrl(), NEXT_LINK_PATH, QUERY);
return mockServer;
} | class VirtualMachineMockTests {
private static final String NEXT_LINK_PATH = "/nextLink";
private static final String QUERY = "'virtualMachineScaleSet/id' eq 'id'";
private static final SerializerAdapter SERIALIZER = SerializerFactory.createDefaultManagementSerializerAdapter();
private final StateHolder stateHolder = new StateHolder();
@Test
public void listByVmssByIdWithNextLinkEncoded() {
WireMockServer mockServer = startMockServer();
try {
ComputeManager computeManager = mockComputeManager(mockServer);
PagedIterable<VirtualMachine> virtualMachines = computeManager.virtualMachines().listByVirtualMachineScaleSetId("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/javacsmrg97796/providers/Microsoft.Compute/virtualMachineScaleSets/vmss035803b7");
Assertions.assertEquals(2, virtualMachines.stream().count());
Assertions.assertTrue(stateHolder.firstPageRequested);
Assertions.assertTrue(stateHolder.secondPageRequested);
} finally {
if (mockServer.isRunning()) {
mockServer.shutdown();
}
}
}
private ComputeManager mockComputeManager(WireMockServer mockServer) {
Map<String, String> environment = new HashMap<>();
environment.put("resourceManagerEndpointUrl", mockServer.baseUrl());
environment.put("microsoftGraphResourceId", mockServer.baseUrl());
AzureProfile mockProfile = new AzureProfile(UUID.randomUUID().toString(), UUID.randomUUID().toString(), new AzureEnvironment(environment));
ComputeManager computeManager = ComputeManager.authenticate(new HttpPipelineBuilder().build(), mockProfile);
return computeManager;
}
private Response failedResponse(String errorMessage) {
return new Response.Builder()
.status(500)
.body(errorMessage)
.build();
}
private VirtualMachineInner mockVmInner() {
return new VirtualMachineInner()
.withLocation("westus")
.withHardwareProfile(new HardwareProfile().withVmSize(VirtualMachineSizeTypes.STANDARD_D1_V2))
.withStorageProfile(
new StorageProfile()
.withImageReference(
new ImageReference()
.withSharedGalleryImageId(
"/SharedGalleries/sharedGalleryName/Images/sharedGalleryImageName/Versions/sharedGalleryImageVersionName"))
.withOsDisk(
new OSDisk()
.withName("myVMosdisk")
.withCaching(CachingTypes.READ_WRITE)
.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE)
.withManagedDisk(
new ManagedDiskParameters()
.withStorageAccountType(StorageAccountTypes.STANDARD_LRS))))
.withOsProfile(
new OSProfile()
.withComputerName("myVM")
.withAdminUsername("{your-username}")
.withAdminPassword("fakeTokenPlaceholder"))
.withNetworkProfile(
new NetworkProfile()
.withNetworkInterfaces(
Arrays
.asList(
new NetworkInterfaceReference()
.withId(
"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/{existing-nic-name}")
.withPrimary(true))));
}
private Response successResponse(Map<String, Object> responseBody) {
try {
return new Response.Builder()
.status(200)
.body(SERIALIZER.serialize(responseBody, SerializerEncoding.JSON))
.build();
} catch (IOException e) {
return new Response.Builder()
.status(500)
.body("Mock server error: " + e.getMessage())
.build();
}
}
private static class StateHolder {
private String nextLinkUrl;
private boolean firstPageRequested;
private boolean secondPageRequested;
}
} | class VirtualMachineMockTests {
private static final String NEXT_LINK_PATH = "/nextLink";
private static final String QUERY = "'virtualMachineScaleSet/id' eq 'id'";
private static final SerializerAdapter SERIALIZER = SerializerFactory.createDefaultManagementSerializerAdapter();
private final StateHolder stateHolder = new StateHolder();
@Test
public void listByVmssByIdWithNextLinkEncoded() {
WireMockServer mockServer = startMockServer();
try {
ComputeManager computeManager = mockComputeManager(mockServer);
PagedIterable<VirtualMachine> virtualMachines = computeManager.virtualMachines().listByVirtualMachineScaleSetId("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/javacsmrg97796/providers/Microsoft.Compute/virtualMachineScaleSets/vmss035803b7");
Assertions.assertEquals(2, virtualMachines.stream().count());
Assertions.assertTrue(stateHolder.firstPageRequested);
Assertions.assertTrue(stateHolder.secondPageRequested);
} finally {
if (mockServer.isRunning()) {
mockServer.shutdown();
}
}
}
private ComputeManager mockComputeManager(WireMockServer mockServer) {
Map<String, String> environment = new HashMap<>();
environment.put("resourceManagerEndpointUrl", mockServer.baseUrl());
environment.put("microsoftGraphResourceId", mockServer.baseUrl());
AzureProfile mockProfile = new AzureProfile(UUID.randomUUID().toString(), UUID.randomUUID().toString(), new AzureEnvironment(environment));
ComputeManager computeManager = ComputeManager.authenticate(new HttpPipelineBuilder().build(), mockProfile);
return computeManager;
}
private Response failedResponse(String errorMessage) {
return new Response.Builder()
.status(400)
.body(errorMessage)
.build();
}
private VirtualMachineInner mockVmInner() {
return new VirtualMachineInner()
.withLocation("westus")
.withHardwareProfile(new HardwareProfile().withVmSize(VirtualMachineSizeTypes.STANDARD_D1_V2))
.withStorageProfile(
new StorageProfile()
.withImageReference(
new ImageReference()
.withSharedGalleryImageId(
"/SharedGalleries/sharedGalleryName/Images/sharedGalleryImageName/Versions/sharedGalleryImageVersionName"))
.withOsDisk(
new OSDisk()
.withName("myVMosdisk")
.withCaching(CachingTypes.READ_WRITE)
.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE)
.withManagedDisk(
new ManagedDiskParameters()
.withStorageAccountType(StorageAccountTypes.STANDARD_LRS))))
.withOsProfile(
new OSProfile()
.withComputerName("myVM")
.withAdminUsername("{your-username}")
.withAdminPassword("fakeTokenPlaceholder"))
.withNetworkProfile(
new NetworkProfile()
.withNetworkInterfaces(
Arrays
.asList(
new NetworkInterfaceReference()
.withId(
"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/{existing-nic-name}")
.withPrimary(true))));
}
private Response successResponse(Map<String, Object> responseBody) {
try {
return new Response.Builder()
.status(200)
.body(SERIALIZER.serialize(responseBody, SerializerEncoding.JSON))
.build();
} catch (IOException e) {
return new Response.Builder()
.status(400)
.body("Mock server error: " + e.getMessage())
.build();
}
}
private static class StateHolder {
private String nextLinkUrl;
private boolean firstPageRequested;
private boolean secondPageRequested;
}
} |
use 4xx | private Response failedResponse(String errorMessage) {
return new Response.Builder()
.status(500)
.body(errorMessage)
.build();
} | .status(500) | private Response failedResponse(String errorMessage) {
return new Response.Builder()
.status(400)
.body(errorMessage)
.build();
} | class VirtualMachineMockTests {
private static final String NEXT_LINK_PATH = "/nextLink";
private static final String QUERY = "'virtualMachineScaleSet/id' eq 'id'";
private static final SerializerAdapter SERIALIZER = SerializerFactory.createDefaultManagementSerializerAdapter();
private final StateHolder stateHolder = new StateHolder();
@Test
public void listByVmssByIdWithNextLinkEncoded() {
WireMockServer mockServer = startMockServer();
try {
ComputeManager computeManager = mockComputeManager(mockServer);
PagedIterable<VirtualMachine> virtualMachines = computeManager.virtualMachines().listByVirtualMachineScaleSetId("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/javacsmrg97796/providers/Microsoft.Compute/virtualMachineScaleSets/vmss035803b7");
Assertions.assertEquals(2, virtualMachines.stream().count());
Assertions.assertTrue(stateHolder.firstPageRequested);
Assertions.assertTrue(stateHolder.secondPageRequested);
} finally {
if (mockServer.isRunning()) {
mockServer.shutdown();
}
}
}
private ComputeManager mockComputeManager(WireMockServer mockServer) {
Map<String, String> environment = new HashMap<>();
environment.put("resourceManagerEndpointUrl", mockServer.baseUrl());
environment.put("microsoftGraphResourceId", mockServer.baseUrl());
AzureProfile mockProfile = new AzureProfile(UUID.randomUUID().toString(), UUID.randomUUID().toString(), new AzureEnvironment(environment));
ComputeManager computeManager = ComputeManager.authenticate(new HttpPipelineBuilder().build(), mockProfile);
return computeManager;
}
private WireMockServer startMockServer() {
ResponseTransformer transformer = new ResponseTransformer() {
@Override
public Response transform(Request request, Response response, FileSource fileSource, Parameters parameters) {
Map<String, Object> responseBody = new HashMap<>();
Map<String, Object> vm;
VirtualMachineInner vmInner = mockVmInner();
try {
vm = SERIALIZER.deserialize(SERIALIZER.serialize(vmInner, SerializerEncoding.JSON), Map.class, SerializerEncoding.JSON);
} catch (IOException e) {
return failedResponse(e.getMessage());
}
vm.put("name", "vmName");
if (request.getUrl().contains("Microsoft.Compute/virtualMachines")) {
stateHolder.firstPageRequested = true;
responseBody.put("value", Collections.singletonList(vm));
responseBody.put("nextLink", stateHolder.nextLinkUrl);
return successResponse(responseBody);
} else if (request.getUrl().contains(NEXT_LINK_PATH)) {
stateHolder.secondPageRequested = true;
if (ResourceUtils.encodeResourceId(request.getUrl()).equals(request.getUrl())) {
responseBody.put("value", Collections.singletonList(vm));
return successResponse(responseBody);
} else {
return failedResponse("Next link not encoded: " + request.getUrl());
}
} else {
return failedResponse("Unexpected request: " + request.getUrl());
}
}
@Override
public String getName() {
return "listByVmssId";
}
};
WireMockServer mockServer = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
mockServer.stubFor(WireMock.any(WireMock.anyUrl()).willReturn(WireMock.aResponse()));
mockServer.start();
stateHolder.nextLinkUrl = String.format("%s%s?filter=%s", mockServer.baseUrl(), NEXT_LINK_PATH, QUERY);
return mockServer;
}
private VirtualMachineInner mockVmInner() {
return new VirtualMachineInner()
.withLocation("westus")
.withHardwareProfile(new HardwareProfile().withVmSize(VirtualMachineSizeTypes.STANDARD_D1_V2))
.withStorageProfile(
new StorageProfile()
.withImageReference(
new ImageReference()
.withSharedGalleryImageId(
"/SharedGalleries/sharedGalleryName/Images/sharedGalleryImageName/Versions/sharedGalleryImageVersionName"))
.withOsDisk(
new OSDisk()
.withName("myVMosdisk")
.withCaching(CachingTypes.READ_WRITE)
.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE)
.withManagedDisk(
new ManagedDiskParameters()
.withStorageAccountType(StorageAccountTypes.STANDARD_LRS))))
.withOsProfile(
new OSProfile()
.withComputerName("myVM")
.withAdminUsername("{your-username}")
.withAdminPassword("fakeTokenPlaceholder"))
.withNetworkProfile(
new NetworkProfile()
.withNetworkInterfaces(
Arrays
.asList(
new NetworkInterfaceReference()
.withId(
"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/{existing-nic-name}")
.withPrimary(true))));
}
private Response successResponse(Map<String, Object> responseBody) {
try {
return new Response.Builder()
.status(200)
.body(SERIALIZER.serialize(responseBody, SerializerEncoding.JSON))
.build();
} catch (IOException e) {
return new Response.Builder()
.status(500)
.body("Mock server error: " + e.getMessage())
.build();
}
}
private static class StateHolder {
private String nextLinkUrl;
private boolean firstPageRequested;
private boolean secondPageRequested;
}
} | class VirtualMachineMockTests {
private static final String NEXT_LINK_PATH = "/nextLink";
private static final String QUERY = "'virtualMachineScaleSet/id' eq 'id'";
private static final SerializerAdapter SERIALIZER = SerializerFactory.createDefaultManagementSerializerAdapter();
private final StateHolder stateHolder = new StateHolder();
@Test
public void listByVmssByIdWithNextLinkEncoded() {
WireMockServer mockServer = startMockServer();
try {
ComputeManager computeManager = mockComputeManager(mockServer);
PagedIterable<VirtualMachine> virtualMachines = computeManager.virtualMachines().listByVirtualMachineScaleSetId("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/javacsmrg97796/providers/Microsoft.Compute/virtualMachineScaleSets/vmss035803b7");
Assertions.assertEquals(2, virtualMachines.stream().count());
Assertions.assertTrue(stateHolder.firstPageRequested);
Assertions.assertTrue(stateHolder.secondPageRequested);
} finally {
if (mockServer.isRunning()) {
mockServer.shutdown();
}
}
}
private ComputeManager mockComputeManager(WireMockServer mockServer) {
Map<String, String> environment = new HashMap<>();
environment.put("resourceManagerEndpointUrl", mockServer.baseUrl());
environment.put("microsoftGraphResourceId", mockServer.baseUrl());
AzureProfile mockProfile = new AzureProfile(UUID.randomUUID().toString(), UUID.randomUUID().toString(), new AzureEnvironment(environment));
ComputeManager computeManager = ComputeManager.authenticate(new HttpPipelineBuilder().build(), mockProfile);
return computeManager;
}
private WireMockServer startMockServer() {
ResponseTransformer transformer = new ResponseTransformer() {
@Override
public Response transform(Request request, Response response, FileSource fileSource, Parameters parameters) {
Map<String, Object> responseBody = new HashMap<>();
Map<String, Object> vm;
VirtualMachineInner vmInner = mockVmInner();
try {
vm = SERIALIZER.deserialize(SERIALIZER.serialize(vmInner, SerializerEncoding.JSON), Map.class, SerializerEncoding.JSON);
} catch (IOException e) {
return failedResponse(e.getMessage());
}
vm.put("name", "vmName");
if (request.getUrl().contains("Microsoft.Compute/virtualMachines")) {
stateHolder.firstPageRequested = true;
responseBody.put("value", Collections.singletonList(vm));
responseBody.put("nextLink", stateHolder.nextLinkUrl);
return successResponse(responseBody);
} else if (request.getUrl().contains(NEXT_LINK_PATH)) {
stateHolder.secondPageRequested = true;
try {
new URI(request.getUrl());
responseBody.put("value", Collections.singletonList(vm));
return successResponse(responseBody);
} catch (URISyntaxException e) {
return failedResponse("Next link not encoded: " + request.getUrl());
}
} else {
return failedResponse("Unexpected request: " + request.getUrl());
}
}
@Override
public String getName() {
return "listByVmssId";
}
};
WireMockServer mockServer = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
mockServer.stubFor(WireMock.any(WireMock.anyUrl()).willReturn(WireMock.aResponse()));
mockServer.start();
stateHolder.nextLinkUrl = String.format("%s%s?filter=%s", mockServer.baseUrl(), NEXT_LINK_PATH, QUERY);
return mockServer;
}
private VirtualMachineInner mockVmInner() {
return new VirtualMachineInner()
.withLocation("westus")
.withHardwareProfile(new HardwareProfile().withVmSize(VirtualMachineSizeTypes.STANDARD_D1_V2))
.withStorageProfile(
new StorageProfile()
.withImageReference(
new ImageReference()
.withSharedGalleryImageId(
"/SharedGalleries/sharedGalleryName/Images/sharedGalleryImageName/Versions/sharedGalleryImageVersionName"))
.withOsDisk(
new OSDisk()
.withName("myVMosdisk")
.withCaching(CachingTypes.READ_WRITE)
.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE)
.withManagedDisk(
new ManagedDiskParameters()
.withStorageAccountType(StorageAccountTypes.STANDARD_LRS))))
.withOsProfile(
new OSProfile()
.withComputerName("myVM")
.withAdminUsername("{your-username}")
.withAdminPassword("fakeTokenPlaceholder"))
.withNetworkProfile(
new NetworkProfile()
.withNetworkInterfaces(
Arrays
.asList(
new NetworkInterfaceReference()
.withId(
"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/{existing-nic-name}")
.withPrimary(true))));
}
private Response successResponse(Map<String, Object> responseBody) {
try {
return new Response.Builder()
.status(200)
.body(SERIALIZER.serialize(responseBody, SerializerEncoding.JSON))
.build();
} catch (IOException e) {
return new Response.Builder()
.status(400)
.body("Mock server error: " + e.getMessage())
.build();
}
}
private static class StateHolder {
private String nextLinkUrl;
private boolean firstPageRequested;
private boolean secondPageRequested;
}
} |
Did we do a real auth? Should we put a mock credential? https://github.com/Azure/azure-sdk-for-java/blob/7df467719475fae92cb68d84772e8aa9bdd954b0/sdk/dnsresolver/azure-resourcemanager-dnsresolver/src/test/java/com/azure/resourcemanager/dnsresolver/generated/DnsResolversCreateOrUpdateTests.java#L62 | private ComputeManager mockComputeManager(WireMockServer mockServer) {
Map<String, String> environment = new HashMap<>();
environment.put("resourceManagerEndpointUrl", mockServer.baseUrl());
environment.put("microsoftGraphResourceId", mockServer.baseUrl());
AzureProfile mockProfile = new AzureProfile(UUID.randomUUID().toString(), UUID.randomUUID().toString(), new AzureEnvironment(environment));
ComputeManager computeManager = ComputeManager.authenticate(new HttpPipelineBuilder().build(), mockProfile);
return computeManager;
} | ComputeManager computeManager = ComputeManager.authenticate(new HttpPipelineBuilder().build(), mockProfile); | private ComputeManager mockComputeManager(WireMockServer mockServer) {
Map<String, String> environment = new HashMap<>();
environment.put("resourceManagerEndpointUrl", mockServer.baseUrl());
environment.put("microsoftGraphResourceId", mockServer.baseUrl());
AzureProfile mockProfile = new AzureProfile(UUID.randomUUID().toString(), UUID.randomUUID().toString(), new AzureEnvironment(environment));
ComputeManager computeManager = ComputeManager.authenticate(new HttpPipelineBuilder().build(), mockProfile);
return computeManager;
} | class VirtualMachineMockTests {
private static final String NEXT_LINK_PATH = "/nextLink";
private static final String QUERY = "'virtualMachineScaleSet/id' eq 'id'";
private static final SerializerAdapter SERIALIZER = SerializerFactory.createDefaultManagementSerializerAdapter();
private final StateHolder stateHolder = new StateHolder();
@Test
public void listByVmssByIdWithNextLinkEncoded() {
WireMockServer mockServer = startMockServer();
try {
ComputeManager computeManager = mockComputeManager(mockServer);
PagedIterable<VirtualMachine> virtualMachines = computeManager.virtualMachines().listByVirtualMachineScaleSetId("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/javacsmrg97796/providers/Microsoft.Compute/virtualMachineScaleSets/vmss035803b7");
Assertions.assertEquals(2, virtualMachines.stream().count());
Assertions.assertTrue(stateHolder.firstPageRequested);
Assertions.assertTrue(stateHolder.secondPageRequested);
} finally {
if (mockServer.isRunning()) {
mockServer.shutdown();
}
}
}
private WireMockServer startMockServer() {
ResponseTransformer transformer = new ResponseTransformer() {
@Override
public Response transform(Request request, Response response, FileSource fileSource, Parameters parameters) {
Map<String, Object> responseBody = new HashMap<>();
Map<String, Object> vm;
VirtualMachineInner vmInner = mockVmInner();
try {
vm = SERIALIZER.deserialize(SERIALIZER.serialize(vmInner, SerializerEncoding.JSON), Map.class, SerializerEncoding.JSON);
} catch (IOException e) {
return failedResponse(e.getMessage());
}
vm.put("name", "vmName");
if (request.getUrl().contains("Microsoft.Compute/virtualMachines")) {
stateHolder.firstPageRequested = true;
responseBody.put("value", Collections.singletonList(vm));
responseBody.put("nextLink", stateHolder.nextLinkUrl);
return successResponse(responseBody);
} else if (request.getUrl().contains(NEXT_LINK_PATH)) {
stateHolder.secondPageRequested = true;
if (ResourceUtils.encodeResourceId(request.getUrl()).equals(request.getUrl())) {
responseBody.put("value", Collections.singletonList(vm));
return successResponse(responseBody);
} else {
return failedResponse("Next link not encoded: " + request.getUrl());
}
} else {
return failedResponse("Unexpected request: " + request.getUrl());
}
}
@Override
public String getName() {
return "listByVmssId";
}
};
WireMockServer mockServer = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
mockServer.stubFor(WireMock.any(WireMock.anyUrl()).willReturn(WireMock.aResponse()));
mockServer.start();
stateHolder.nextLinkUrl = String.format("%s%s?filter=%s", mockServer.baseUrl(), NEXT_LINK_PATH, QUERY);
return mockServer;
}
private Response failedResponse(String errorMessage) {
return new Response.Builder()
.status(500)
.body(errorMessage)
.build();
}
private VirtualMachineInner mockVmInner() {
return new VirtualMachineInner()
.withLocation("westus")
.withHardwareProfile(new HardwareProfile().withVmSize(VirtualMachineSizeTypes.STANDARD_D1_V2))
.withStorageProfile(
new StorageProfile()
.withImageReference(
new ImageReference()
.withSharedGalleryImageId(
"/SharedGalleries/sharedGalleryName/Images/sharedGalleryImageName/Versions/sharedGalleryImageVersionName"))
.withOsDisk(
new OSDisk()
.withName("myVMosdisk")
.withCaching(CachingTypes.READ_WRITE)
.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE)
.withManagedDisk(
new ManagedDiskParameters()
.withStorageAccountType(StorageAccountTypes.STANDARD_LRS))))
.withOsProfile(
new OSProfile()
.withComputerName("myVM")
.withAdminUsername("{your-username}")
.withAdminPassword("fakeTokenPlaceholder"))
.withNetworkProfile(
new NetworkProfile()
.withNetworkInterfaces(
Arrays
.asList(
new NetworkInterfaceReference()
.withId(
"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/{existing-nic-name}")
.withPrimary(true))));
}
private Response successResponse(Map<String, Object> responseBody) {
try {
return new Response.Builder()
.status(200)
.body(SERIALIZER.serialize(responseBody, SerializerEncoding.JSON))
.build();
} catch (IOException e) {
return new Response.Builder()
.status(500)
.body("Mock server error: " + e.getMessage())
.build();
}
}
private static class StateHolder {
private String nextLinkUrl;
private boolean firstPageRequested;
private boolean secondPageRequested;
}
} | class VirtualMachineMockTests {
private static final String NEXT_LINK_PATH = "/nextLink";
private static final String QUERY = "'virtualMachineScaleSet/id' eq 'id'";
private static final SerializerAdapter SERIALIZER = SerializerFactory.createDefaultManagementSerializerAdapter();
private final StateHolder stateHolder = new StateHolder();
@Test
public void listByVmssByIdWithNextLinkEncoded() {
WireMockServer mockServer = startMockServer();
try {
ComputeManager computeManager = mockComputeManager(mockServer);
PagedIterable<VirtualMachine> virtualMachines = computeManager.virtualMachines().listByVirtualMachineScaleSetId("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/javacsmrg97796/providers/Microsoft.Compute/virtualMachineScaleSets/vmss035803b7");
Assertions.assertEquals(2, virtualMachines.stream().count());
Assertions.assertTrue(stateHolder.firstPageRequested);
Assertions.assertTrue(stateHolder.secondPageRequested);
} finally {
if (mockServer.isRunning()) {
mockServer.shutdown();
}
}
}
private WireMockServer startMockServer() {
ResponseTransformer transformer = new ResponseTransformer() {
@Override
public Response transform(Request request, Response response, FileSource fileSource, Parameters parameters) {
Map<String, Object> responseBody = new HashMap<>();
Map<String, Object> vm;
VirtualMachineInner vmInner = mockVmInner();
try {
vm = SERIALIZER.deserialize(SERIALIZER.serialize(vmInner, SerializerEncoding.JSON), Map.class, SerializerEncoding.JSON);
} catch (IOException e) {
return failedResponse(e.getMessage());
}
vm.put("name", "vmName");
if (request.getUrl().contains("Microsoft.Compute/virtualMachines")) {
stateHolder.firstPageRequested = true;
responseBody.put("value", Collections.singletonList(vm));
responseBody.put("nextLink", stateHolder.nextLinkUrl);
return successResponse(responseBody);
} else if (request.getUrl().contains(NEXT_LINK_PATH)) {
stateHolder.secondPageRequested = true;
try {
new URI(request.getUrl());
responseBody.put("value", Collections.singletonList(vm));
return successResponse(responseBody);
} catch (URISyntaxException e) {
return failedResponse("Next link not encoded: " + request.getUrl());
}
} else {
return failedResponse("Unexpected request: " + request.getUrl());
}
}
@Override
public String getName() {
return "listByVmssId";
}
};
WireMockServer mockServer = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
mockServer.stubFor(WireMock.any(WireMock.anyUrl()).willReturn(WireMock.aResponse()));
mockServer.start();
stateHolder.nextLinkUrl = String.format("%s%s?filter=%s", mockServer.baseUrl(), NEXT_LINK_PATH, QUERY);
return mockServer;
}
private Response failedResponse(String errorMessage) {
return new Response.Builder()
.status(400)
.body(errorMessage)
.build();
}
private VirtualMachineInner mockVmInner() {
return new VirtualMachineInner()
.withLocation("westus")
.withHardwareProfile(new HardwareProfile().withVmSize(VirtualMachineSizeTypes.STANDARD_D1_V2))
.withStorageProfile(
new StorageProfile()
.withImageReference(
new ImageReference()
.withSharedGalleryImageId(
"/SharedGalleries/sharedGalleryName/Images/sharedGalleryImageName/Versions/sharedGalleryImageVersionName"))
.withOsDisk(
new OSDisk()
.withName("myVMosdisk")
.withCaching(CachingTypes.READ_WRITE)
.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE)
.withManagedDisk(
new ManagedDiskParameters()
.withStorageAccountType(StorageAccountTypes.STANDARD_LRS))))
.withOsProfile(
new OSProfile()
.withComputerName("myVM")
.withAdminUsername("{your-username}")
.withAdminPassword("fakeTokenPlaceholder"))
.withNetworkProfile(
new NetworkProfile()
.withNetworkInterfaces(
Arrays
.asList(
new NetworkInterfaceReference()
.withId(
"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/{existing-nic-name}")
.withPrimary(true))));
}
private Response successResponse(Map<String, Object> responseBody) {
try {
return new Response.Builder()
.status(200)
.body(SERIALIZER.serialize(responseBody, SerializerEncoding.JSON))
.build();
} catch (IOException e) {
return new Response.Builder()
.status(400)
.body("Mock server error: " + e.getMessage())
.build();
}
}
private static class StateHolder {
private String nextLinkUrl;
private boolean firstPageRequested;
private boolean secondPageRequested;
}
} |
Sure, will do `new URI` test here. | private WireMockServer startMockServer() {
ResponseTransformer transformer = new ResponseTransformer() {
@Override
public Response transform(Request request, Response response, FileSource fileSource, Parameters parameters) {
Map<String, Object> responseBody = new HashMap<>();
Map<String, Object> vm;
VirtualMachineInner vmInner = mockVmInner();
try {
vm = SERIALIZER.deserialize(SERIALIZER.serialize(vmInner, SerializerEncoding.JSON), Map.class, SerializerEncoding.JSON);
} catch (IOException e) {
return failedResponse(e.getMessage());
}
vm.put("name", "vmName");
if (request.getUrl().contains("Microsoft.Compute/virtualMachines")) {
stateHolder.firstPageRequested = true;
responseBody.put("value", Collections.singletonList(vm));
responseBody.put("nextLink", stateHolder.nextLinkUrl);
return successResponse(responseBody);
} else if (request.getUrl().contains(NEXT_LINK_PATH)) {
stateHolder.secondPageRequested = true;
if (ResourceUtils.encodeResourceId(request.getUrl()).equals(request.getUrl())) {
responseBody.put("value", Collections.singletonList(vm));
return successResponse(responseBody);
} else {
return failedResponse("Next link not encoded: " + request.getUrl());
}
} else {
return failedResponse("Unexpected request: " + request.getUrl());
}
}
@Override
public String getName() {
return "listByVmssId";
}
};
WireMockServer mockServer = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
mockServer.stubFor(WireMock.any(WireMock.anyUrl()).willReturn(WireMock.aResponse()));
mockServer.start();
stateHolder.nextLinkUrl = String.format("%s%s?filter=%s", mockServer.baseUrl(), NEXT_LINK_PATH, QUERY);
return mockServer;
} | if (ResourceUtils.encodeResourceId(request.getUrl()).equals(request.getUrl())) { | private WireMockServer startMockServer() {
ResponseTransformer transformer = new ResponseTransformer() {
@Override
public Response transform(Request request, Response response, FileSource fileSource, Parameters parameters) {
Map<String, Object> responseBody = new HashMap<>();
Map<String, Object> vm;
VirtualMachineInner vmInner = mockVmInner();
try {
vm = SERIALIZER.deserialize(SERIALIZER.serialize(vmInner, SerializerEncoding.JSON), Map.class, SerializerEncoding.JSON);
} catch (IOException e) {
return failedResponse(e.getMessage());
}
vm.put("name", "vmName");
if (request.getUrl().contains("Microsoft.Compute/virtualMachines")) {
stateHolder.firstPageRequested = true;
responseBody.put("value", Collections.singletonList(vm));
responseBody.put("nextLink", stateHolder.nextLinkUrl);
return successResponse(responseBody);
} else if (request.getUrl().contains(NEXT_LINK_PATH)) {
stateHolder.secondPageRequested = true;
try {
new URI(request.getUrl());
responseBody.put("value", Collections.singletonList(vm));
return successResponse(responseBody);
} catch (URISyntaxException e) {
return failedResponse("Next link not encoded: " + request.getUrl());
}
} else {
return failedResponse("Unexpected request: " + request.getUrl());
}
}
@Override
public String getName() {
return "listByVmssId";
}
};
WireMockServer mockServer = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
mockServer.stubFor(WireMock.any(WireMock.anyUrl()).willReturn(WireMock.aResponse()));
mockServer.start();
stateHolder.nextLinkUrl = String.format("%s%s?filter=%s", mockServer.baseUrl(), NEXT_LINK_PATH, QUERY);
return mockServer;
} | class VirtualMachineMockTests {
private static final String NEXT_LINK_PATH = "/nextLink";
private static final String QUERY = "'virtualMachineScaleSet/id' eq 'id'";
private static final SerializerAdapter SERIALIZER = SerializerFactory.createDefaultManagementSerializerAdapter();
private final StateHolder stateHolder = new StateHolder();
@Test
public void listByVmssByIdWithNextLinkEncoded() {
WireMockServer mockServer = startMockServer();
try {
ComputeManager computeManager = mockComputeManager(mockServer);
PagedIterable<VirtualMachine> virtualMachines = computeManager.virtualMachines().listByVirtualMachineScaleSetId("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/javacsmrg97796/providers/Microsoft.Compute/virtualMachineScaleSets/vmss035803b7");
Assertions.assertEquals(2, virtualMachines.stream().count());
Assertions.assertTrue(stateHolder.firstPageRequested);
Assertions.assertTrue(stateHolder.secondPageRequested);
} finally {
if (mockServer.isRunning()) {
mockServer.shutdown();
}
}
}
private ComputeManager mockComputeManager(WireMockServer mockServer) {
Map<String, String> environment = new HashMap<>();
environment.put("resourceManagerEndpointUrl", mockServer.baseUrl());
environment.put("microsoftGraphResourceId", mockServer.baseUrl());
AzureProfile mockProfile = new AzureProfile(UUID.randomUUID().toString(), UUID.randomUUID().toString(), new AzureEnvironment(environment));
ComputeManager computeManager = ComputeManager.authenticate(new HttpPipelineBuilder().build(), mockProfile);
return computeManager;
}
private Response failedResponse(String errorMessage) {
return new Response.Builder()
.status(500)
.body(errorMessage)
.build();
}
private VirtualMachineInner mockVmInner() {
return new VirtualMachineInner()
.withLocation("westus")
.withHardwareProfile(new HardwareProfile().withVmSize(VirtualMachineSizeTypes.STANDARD_D1_V2))
.withStorageProfile(
new StorageProfile()
.withImageReference(
new ImageReference()
.withSharedGalleryImageId(
"/SharedGalleries/sharedGalleryName/Images/sharedGalleryImageName/Versions/sharedGalleryImageVersionName"))
.withOsDisk(
new OSDisk()
.withName("myVMosdisk")
.withCaching(CachingTypes.READ_WRITE)
.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE)
.withManagedDisk(
new ManagedDiskParameters()
.withStorageAccountType(StorageAccountTypes.STANDARD_LRS))))
.withOsProfile(
new OSProfile()
.withComputerName("myVM")
.withAdminUsername("{your-username}")
.withAdminPassword("fakeTokenPlaceholder"))
.withNetworkProfile(
new NetworkProfile()
.withNetworkInterfaces(
Arrays
.asList(
new NetworkInterfaceReference()
.withId(
"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/{existing-nic-name}")
.withPrimary(true))));
}
private Response successResponse(Map<String, Object> responseBody) {
try {
return new Response.Builder()
.status(200)
.body(SERIALIZER.serialize(responseBody, SerializerEncoding.JSON))
.build();
} catch (IOException e) {
return new Response.Builder()
.status(500)
.body("Mock server error: " + e.getMessage())
.build();
}
}
private static class StateHolder {
private String nextLinkUrl;
private boolean firstPageRequested;
private boolean secondPageRequested;
}
} | class VirtualMachineMockTests {
private static final String NEXT_LINK_PATH = "/nextLink";
private static final String QUERY = "'virtualMachineScaleSet/id' eq 'id'";
private static final SerializerAdapter SERIALIZER = SerializerFactory.createDefaultManagementSerializerAdapter();
private final StateHolder stateHolder = new StateHolder();
@Test
public void listByVmssByIdWithNextLinkEncoded() {
WireMockServer mockServer = startMockServer();
try {
ComputeManager computeManager = mockComputeManager(mockServer);
PagedIterable<VirtualMachine> virtualMachines = computeManager.virtualMachines().listByVirtualMachineScaleSetId("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/javacsmrg97796/providers/Microsoft.Compute/virtualMachineScaleSets/vmss035803b7");
Assertions.assertEquals(2, virtualMachines.stream().count());
Assertions.assertTrue(stateHolder.firstPageRequested);
Assertions.assertTrue(stateHolder.secondPageRequested);
} finally {
if (mockServer.isRunning()) {
mockServer.shutdown();
}
}
}
private ComputeManager mockComputeManager(WireMockServer mockServer) {
Map<String, String> environment = new HashMap<>();
environment.put("resourceManagerEndpointUrl", mockServer.baseUrl());
environment.put("microsoftGraphResourceId", mockServer.baseUrl());
AzureProfile mockProfile = new AzureProfile(UUID.randomUUID().toString(), UUID.randomUUID().toString(), new AzureEnvironment(environment));
ComputeManager computeManager = ComputeManager.authenticate(new HttpPipelineBuilder().build(), mockProfile);
return computeManager;
}
private Response failedResponse(String errorMessage) {
return new Response.Builder()
.status(400)
.body(errorMessage)
.build();
}
private VirtualMachineInner mockVmInner() {
return new VirtualMachineInner()
.withLocation("westus")
.withHardwareProfile(new HardwareProfile().withVmSize(VirtualMachineSizeTypes.STANDARD_D1_V2))
.withStorageProfile(
new StorageProfile()
.withImageReference(
new ImageReference()
.withSharedGalleryImageId(
"/SharedGalleries/sharedGalleryName/Images/sharedGalleryImageName/Versions/sharedGalleryImageVersionName"))
.withOsDisk(
new OSDisk()
.withName("myVMosdisk")
.withCaching(CachingTypes.READ_WRITE)
.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE)
.withManagedDisk(
new ManagedDiskParameters()
.withStorageAccountType(StorageAccountTypes.STANDARD_LRS))))
.withOsProfile(
new OSProfile()
.withComputerName("myVM")
.withAdminUsername("{your-username}")
.withAdminPassword("fakeTokenPlaceholder"))
.withNetworkProfile(
new NetworkProfile()
.withNetworkInterfaces(
Arrays
.asList(
new NetworkInterfaceReference()
.withId(
"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/{existing-nic-name}")
.withPrimary(true))));
}
private Response successResponse(Map<String, Object> responseBody) {
try {
return new Response.Builder()
.status(200)
.body(SERIALIZER.serialize(responseBody, SerializerEncoding.JSON))
.build();
} catch (IOException e) {
return new Response.Builder()
.status(400)
.body("Mock server error: " + e.getMessage())
.build();
}
}
private static class StateHolder {
private String nextLinkUrl;
private boolean firstPageRequested;
private boolean secondPageRequested;
}
} |
I used an empty pipeline without `AuthenticationPolicy` here, so no token is needed. If use default pipeline, `BearerTokenAuthenticationPolicy` would check if the request http protocol is `https`: https://github.com/Azure/azure-sdk-for-java/blob/d2bfd01a7ca51d79356a1bf0ba10b57c2077e383/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/BearerTokenAuthenticationPolicy.java#L125-L128 Since I'm doing local mock server with `http`, I disable it. | private ComputeManager mockComputeManager(WireMockServer mockServer) {
Map<String, String> environment = new HashMap<>();
environment.put("resourceManagerEndpointUrl", mockServer.baseUrl());
environment.put("microsoftGraphResourceId", mockServer.baseUrl());
AzureProfile mockProfile = new AzureProfile(UUID.randomUUID().toString(), UUID.randomUUID().toString(), new AzureEnvironment(environment));
ComputeManager computeManager = ComputeManager.authenticate(new HttpPipelineBuilder().build(), mockProfile);
return computeManager;
} | ComputeManager computeManager = ComputeManager.authenticate(new HttpPipelineBuilder().build(), mockProfile); | private ComputeManager mockComputeManager(WireMockServer mockServer) {
Map<String, String> environment = new HashMap<>();
environment.put("resourceManagerEndpointUrl", mockServer.baseUrl());
environment.put("microsoftGraphResourceId", mockServer.baseUrl());
AzureProfile mockProfile = new AzureProfile(UUID.randomUUID().toString(), UUID.randomUUID().toString(), new AzureEnvironment(environment));
ComputeManager computeManager = ComputeManager.authenticate(new HttpPipelineBuilder().build(), mockProfile);
return computeManager;
} | class VirtualMachineMockTests {
private static final String NEXT_LINK_PATH = "/nextLink";
private static final String QUERY = "'virtualMachineScaleSet/id' eq 'id'";
private static final SerializerAdapter SERIALIZER = SerializerFactory.createDefaultManagementSerializerAdapter();
private final StateHolder stateHolder = new StateHolder();
@Test
public void listByVmssByIdWithNextLinkEncoded() {
WireMockServer mockServer = startMockServer();
try {
ComputeManager computeManager = mockComputeManager(mockServer);
PagedIterable<VirtualMachine> virtualMachines = computeManager.virtualMachines().listByVirtualMachineScaleSetId("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/javacsmrg97796/providers/Microsoft.Compute/virtualMachineScaleSets/vmss035803b7");
Assertions.assertEquals(2, virtualMachines.stream().count());
Assertions.assertTrue(stateHolder.firstPageRequested);
Assertions.assertTrue(stateHolder.secondPageRequested);
} finally {
if (mockServer.isRunning()) {
mockServer.shutdown();
}
}
}
private WireMockServer startMockServer() {
ResponseTransformer transformer = new ResponseTransformer() {
@Override
public Response transform(Request request, Response response, FileSource fileSource, Parameters parameters) {
Map<String, Object> responseBody = new HashMap<>();
Map<String, Object> vm;
VirtualMachineInner vmInner = mockVmInner();
try {
vm = SERIALIZER.deserialize(SERIALIZER.serialize(vmInner, SerializerEncoding.JSON), Map.class, SerializerEncoding.JSON);
} catch (IOException e) {
return failedResponse(e.getMessage());
}
vm.put("name", "vmName");
if (request.getUrl().contains("Microsoft.Compute/virtualMachines")) {
stateHolder.firstPageRequested = true;
responseBody.put("value", Collections.singletonList(vm));
responseBody.put("nextLink", stateHolder.nextLinkUrl);
return successResponse(responseBody);
} else if (request.getUrl().contains(NEXT_LINK_PATH)) {
stateHolder.secondPageRequested = true;
if (ResourceUtils.encodeResourceId(request.getUrl()).equals(request.getUrl())) {
responseBody.put("value", Collections.singletonList(vm));
return successResponse(responseBody);
} else {
return failedResponse("Next link not encoded: " + request.getUrl());
}
} else {
return failedResponse("Unexpected request: " + request.getUrl());
}
}
@Override
public String getName() {
return "listByVmssId";
}
};
WireMockServer mockServer = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
mockServer.stubFor(WireMock.any(WireMock.anyUrl()).willReturn(WireMock.aResponse()));
mockServer.start();
stateHolder.nextLinkUrl = String.format("%s%s?filter=%s", mockServer.baseUrl(), NEXT_LINK_PATH, QUERY);
return mockServer;
}
private Response failedResponse(String errorMessage) {
return new Response.Builder()
.status(500)
.body(errorMessage)
.build();
}
private VirtualMachineInner mockVmInner() {
return new VirtualMachineInner()
.withLocation("westus")
.withHardwareProfile(new HardwareProfile().withVmSize(VirtualMachineSizeTypes.STANDARD_D1_V2))
.withStorageProfile(
new StorageProfile()
.withImageReference(
new ImageReference()
.withSharedGalleryImageId(
"/SharedGalleries/sharedGalleryName/Images/sharedGalleryImageName/Versions/sharedGalleryImageVersionName"))
.withOsDisk(
new OSDisk()
.withName("myVMosdisk")
.withCaching(CachingTypes.READ_WRITE)
.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE)
.withManagedDisk(
new ManagedDiskParameters()
.withStorageAccountType(StorageAccountTypes.STANDARD_LRS))))
.withOsProfile(
new OSProfile()
.withComputerName("myVM")
.withAdminUsername("{your-username}")
.withAdminPassword("fakeTokenPlaceholder"))
.withNetworkProfile(
new NetworkProfile()
.withNetworkInterfaces(
Arrays
.asList(
new NetworkInterfaceReference()
.withId(
"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/{existing-nic-name}")
.withPrimary(true))));
}
private Response successResponse(Map<String, Object> responseBody) {
try {
return new Response.Builder()
.status(200)
.body(SERIALIZER.serialize(responseBody, SerializerEncoding.JSON))
.build();
} catch (IOException e) {
return new Response.Builder()
.status(500)
.body("Mock server error: " + e.getMessage())
.build();
}
}
private static class StateHolder {
private String nextLinkUrl;
private boolean firstPageRequested;
private boolean secondPageRequested;
}
} | class VirtualMachineMockTests {
private static final String NEXT_LINK_PATH = "/nextLink";
private static final String QUERY = "'virtualMachineScaleSet/id' eq 'id'";
private static final SerializerAdapter SERIALIZER = SerializerFactory.createDefaultManagementSerializerAdapter();
private final StateHolder stateHolder = new StateHolder();
@Test
public void listByVmssByIdWithNextLinkEncoded() {
WireMockServer mockServer = startMockServer();
try {
ComputeManager computeManager = mockComputeManager(mockServer);
PagedIterable<VirtualMachine> virtualMachines = computeManager.virtualMachines().listByVirtualMachineScaleSetId("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/javacsmrg97796/providers/Microsoft.Compute/virtualMachineScaleSets/vmss035803b7");
Assertions.assertEquals(2, virtualMachines.stream().count());
Assertions.assertTrue(stateHolder.firstPageRequested);
Assertions.assertTrue(stateHolder.secondPageRequested);
} finally {
if (mockServer.isRunning()) {
mockServer.shutdown();
}
}
}
private WireMockServer startMockServer() {
ResponseTransformer transformer = new ResponseTransformer() {
@Override
public Response transform(Request request, Response response, FileSource fileSource, Parameters parameters) {
Map<String, Object> responseBody = new HashMap<>();
Map<String, Object> vm;
VirtualMachineInner vmInner = mockVmInner();
try {
vm = SERIALIZER.deserialize(SERIALIZER.serialize(vmInner, SerializerEncoding.JSON), Map.class, SerializerEncoding.JSON);
} catch (IOException e) {
return failedResponse(e.getMessage());
}
vm.put("name", "vmName");
if (request.getUrl().contains("Microsoft.Compute/virtualMachines")) {
stateHolder.firstPageRequested = true;
responseBody.put("value", Collections.singletonList(vm));
responseBody.put("nextLink", stateHolder.nextLinkUrl);
return successResponse(responseBody);
} else if (request.getUrl().contains(NEXT_LINK_PATH)) {
stateHolder.secondPageRequested = true;
try {
new URI(request.getUrl());
responseBody.put("value", Collections.singletonList(vm));
return successResponse(responseBody);
} catch (URISyntaxException e) {
return failedResponse("Next link not encoded: " + request.getUrl());
}
} else {
return failedResponse("Unexpected request: " + request.getUrl());
}
}
@Override
public String getName() {
return "listByVmssId";
}
};
WireMockServer mockServer = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
mockServer.stubFor(WireMock.any(WireMock.anyUrl()).willReturn(WireMock.aResponse()));
mockServer.start();
stateHolder.nextLinkUrl = String.format("%s%s?filter=%s", mockServer.baseUrl(), NEXT_LINK_PATH, QUERY);
return mockServer;
}
private Response failedResponse(String errorMessage) {
return new Response.Builder()
.status(400)
.body(errorMessage)
.build();
}
private VirtualMachineInner mockVmInner() {
return new VirtualMachineInner()
.withLocation("westus")
.withHardwareProfile(new HardwareProfile().withVmSize(VirtualMachineSizeTypes.STANDARD_D1_V2))
.withStorageProfile(
new StorageProfile()
.withImageReference(
new ImageReference()
.withSharedGalleryImageId(
"/SharedGalleries/sharedGalleryName/Images/sharedGalleryImageName/Versions/sharedGalleryImageVersionName"))
.withOsDisk(
new OSDisk()
.withName("myVMosdisk")
.withCaching(CachingTypes.READ_WRITE)
.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE)
.withManagedDisk(
new ManagedDiskParameters()
.withStorageAccountType(StorageAccountTypes.STANDARD_LRS))))
.withOsProfile(
new OSProfile()
.withComputerName("myVM")
.withAdminUsername("{your-username}")
.withAdminPassword("fakeTokenPlaceholder"))
.withNetworkProfile(
new NetworkProfile()
.withNetworkInterfaces(
Arrays
.asList(
new NetworkInterfaceReference()
.withId(
"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/{existing-nic-name}")
.withPrimary(true))));
}
private Response successResponse(Map<String, Object> responseBody) {
try {
return new Response.Builder()
.status(200)
.body(SERIALIZER.serialize(responseBody, SerializerEncoding.JSON))
.build();
} catch (IOException e) {
return new Response.Builder()
.status(400)
.body("Mock server error: " + e.getMessage())
.build();
}
}
private static class StateHolder {
private String nextLinkUrl;
private boolean firstPageRequested;
private boolean secondPageRequested;
}
} |
Sure. That's fine. | private ComputeManager mockComputeManager(WireMockServer mockServer) {
Map<String, String> environment = new HashMap<>();
environment.put("resourceManagerEndpointUrl", mockServer.baseUrl());
environment.put("microsoftGraphResourceId", mockServer.baseUrl());
AzureProfile mockProfile = new AzureProfile(UUID.randomUUID().toString(), UUID.randomUUID().toString(), new AzureEnvironment(environment));
ComputeManager computeManager = ComputeManager.authenticate(new HttpPipelineBuilder().build(), mockProfile);
return computeManager;
} | ComputeManager computeManager = ComputeManager.authenticate(new HttpPipelineBuilder().build(), mockProfile); | private ComputeManager mockComputeManager(WireMockServer mockServer) {
Map<String, String> environment = new HashMap<>();
environment.put("resourceManagerEndpointUrl", mockServer.baseUrl());
environment.put("microsoftGraphResourceId", mockServer.baseUrl());
AzureProfile mockProfile = new AzureProfile(UUID.randomUUID().toString(), UUID.randomUUID().toString(), new AzureEnvironment(environment));
ComputeManager computeManager = ComputeManager.authenticate(new HttpPipelineBuilder().build(), mockProfile);
return computeManager;
} | class VirtualMachineMockTests {
private static final String NEXT_LINK_PATH = "/nextLink";
private static final String QUERY = "'virtualMachineScaleSet/id' eq 'id'";
private static final SerializerAdapter SERIALIZER = SerializerFactory.createDefaultManagementSerializerAdapter();
private final StateHolder stateHolder = new StateHolder();
@Test
public void listByVmssByIdWithNextLinkEncoded() {
WireMockServer mockServer = startMockServer();
try {
ComputeManager computeManager = mockComputeManager(mockServer);
PagedIterable<VirtualMachine> virtualMachines = computeManager.virtualMachines().listByVirtualMachineScaleSetId("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/javacsmrg97796/providers/Microsoft.Compute/virtualMachineScaleSets/vmss035803b7");
Assertions.assertEquals(2, virtualMachines.stream().count());
Assertions.assertTrue(stateHolder.firstPageRequested);
Assertions.assertTrue(stateHolder.secondPageRequested);
} finally {
if (mockServer.isRunning()) {
mockServer.shutdown();
}
}
}
private WireMockServer startMockServer() {
ResponseTransformer transformer = new ResponseTransformer() {
@Override
public Response transform(Request request, Response response, FileSource fileSource, Parameters parameters) {
Map<String, Object> responseBody = new HashMap<>();
Map<String, Object> vm;
VirtualMachineInner vmInner = mockVmInner();
try {
vm = SERIALIZER.deserialize(SERIALIZER.serialize(vmInner, SerializerEncoding.JSON), Map.class, SerializerEncoding.JSON);
} catch (IOException e) {
return failedResponse(e.getMessage());
}
vm.put("name", "vmName");
if (request.getUrl().contains("Microsoft.Compute/virtualMachines")) {
stateHolder.firstPageRequested = true;
responseBody.put("value", Collections.singletonList(vm));
responseBody.put("nextLink", stateHolder.nextLinkUrl);
return successResponse(responseBody);
} else if (request.getUrl().contains(NEXT_LINK_PATH)) {
stateHolder.secondPageRequested = true;
if (ResourceUtils.encodeResourceId(request.getUrl()).equals(request.getUrl())) {
responseBody.put("value", Collections.singletonList(vm));
return successResponse(responseBody);
} else {
return failedResponse("Next link not encoded: " + request.getUrl());
}
} else {
return failedResponse("Unexpected request: " + request.getUrl());
}
}
@Override
public String getName() {
return "listByVmssId";
}
};
WireMockServer mockServer = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
mockServer.stubFor(WireMock.any(WireMock.anyUrl()).willReturn(WireMock.aResponse()));
mockServer.start();
stateHolder.nextLinkUrl = String.format("%s%s?filter=%s", mockServer.baseUrl(), NEXT_LINK_PATH, QUERY);
return mockServer;
}
private Response failedResponse(String errorMessage) {
return new Response.Builder()
.status(500)
.body(errorMessage)
.build();
}
private VirtualMachineInner mockVmInner() {
return new VirtualMachineInner()
.withLocation("westus")
.withHardwareProfile(new HardwareProfile().withVmSize(VirtualMachineSizeTypes.STANDARD_D1_V2))
.withStorageProfile(
new StorageProfile()
.withImageReference(
new ImageReference()
.withSharedGalleryImageId(
"/SharedGalleries/sharedGalleryName/Images/sharedGalleryImageName/Versions/sharedGalleryImageVersionName"))
.withOsDisk(
new OSDisk()
.withName("myVMosdisk")
.withCaching(CachingTypes.READ_WRITE)
.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE)
.withManagedDisk(
new ManagedDiskParameters()
.withStorageAccountType(StorageAccountTypes.STANDARD_LRS))))
.withOsProfile(
new OSProfile()
.withComputerName("myVM")
.withAdminUsername("{your-username}")
.withAdminPassword("fakeTokenPlaceholder"))
.withNetworkProfile(
new NetworkProfile()
.withNetworkInterfaces(
Arrays
.asList(
new NetworkInterfaceReference()
.withId(
"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/{existing-nic-name}")
.withPrimary(true))));
}
private Response successResponse(Map<String, Object> responseBody) {
try {
return new Response.Builder()
.status(200)
.body(SERIALIZER.serialize(responseBody, SerializerEncoding.JSON))
.build();
} catch (IOException e) {
return new Response.Builder()
.status(500)
.body("Mock server error: " + e.getMessage())
.build();
}
}
private static class StateHolder {
private String nextLinkUrl;
private boolean firstPageRequested;
private boolean secondPageRequested;
}
} | class VirtualMachineMockTests {
private static final String NEXT_LINK_PATH = "/nextLink";
private static final String QUERY = "'virtualMachineScaleSet/id' eq 'id'";
private static final SerializerAdapter SERIALIZER = SerializerFactory.createDefaultManagementSerializerAdapter();
private final StateHolder stateHolder = new StateHolder();
@Test
public void listByVmssByIdWithNextLinkEncoded() {
WireMockServer mockServer = startMockServer();
try {
ComputeManager computeManager = mockComputeManager(mockServer);
PagedIterable<VirtualMachine> virtualMachines = computeManager.virtualMachines().listByVirtualMachineScaleSetId("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/javacsmrg97796/providers/Microsoft.Compute/virtualMachineScaleSets/vmss035803b7");
Assertions.assertEquals(2, virtualMachines.stream().count());
Assertions.assertTrue(stateHolder.firstPageRequested);
Assertions.assertTrue(stateHolder.secondPageRequested);
} finally {
if (mockServer.isRunning()) {
mockServer.shutdown();
}
}
}
private WireMockServer startMockServer() {
ResponseTransformer transformer = new ResponseTransformer() {
@Override
public Response transform(Request request, Response response, FileSource fileSource, Parameters parameters) {
Map<String, Object> responseBody = new HashMap<>();
Map<String, Object> vm;
VirtualMachineInner vmInner = mockVmInner();
try {
vm = SERIALIZER.deserialize(SERIALIZER.serialize(vmInner, SerializerEncoding.JSON), Map.class, SerializerEncoding.JSON);
} catch (IOException e) {
return failedResponse(e.getMessage());
}
vm.put("name", "vmName");
if (request.getUrl().contains("Microsoft.Compute/virtualMachines")) {
stateHolder.firstPageRequested = true;
responseBody.put("value", Collections.singletonList(vm));
responseBody.put("nextLink", stateHolder.nextLinkUrl);
return successResponse(responseBody);
} else if (request.getUrl().contains(NEXT_LINK_PATH)) {
stateHolder.secondPageRequested = true;
try {
new URI(request.getUrl());
responseBody.put("value", Collections.singletonList(vm));
return successResponse(responseBody);
} catch (URISyntaxException e) {
return failedResponse("Next link not encoded: " + request.getUrl());
}
} else {
return failedResponse("Unexpected request: " + request.getUrl());
}
}
@Override
public String getName() {
return "listByVmssId";
}
};
WireMockServer mockServer = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
mockServer.stubFor(WireMock.any(WireMock.anyUrl()).willReturn(WireMock.aResponse()));
mockServer.start();
stateHolder.nextLinkUrl = String.format("%s%s?filter=%s", mockServer.baseUrl(), NEXT_LINK_PATH, QUERY);
return mockServer;
}
private Response failedResponse(String errorMessage) {
return new Response.Builder()
.status(400)
.body(errorMessage)
.build();
}
private VirtualMachineInner mockVmInner() {
return new VirtualMachineInner()
.withLocation("westus")
.withHardwareProfile(new HardwareProfile().withVmSize(VirtualMachineSizeTypes.STANDARD_D1_V2))
.withStorageProfile(
new StorageProfile()
.withImageReference(
new ImageReference()
.withSharedGalleryImageId(
"/SharedGalleries/sharedGalleryName/Images/sharedGalleryImageName/Versions/sharedGalleryImageVersionName"))
.withOsDisk(
new OSDisk()
.withName("myVMosdisk")
.withCaching(CachingTypes.READ_WRITE)
.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE)
.withManagedDisk(
new ManagedDiskParameters()
.withStorageAccountType(StorageAccountTypes.STANDARD_LRS))))
.withOsProfile(
new OSProfile()
.withComputerName("myVM")
.withAdminUsername("{your-username}")
.withAdminPassword("fakeTokenPlaceholder"))
.withNetworkProfile(
new NetworkProfile()
.withNetworkInterfaces(
Arrays
.asList(
new NetworkInterfaceReference()
.withId(
"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/{existing-nic-name}")
.withPrimary(true))));
}
private Response successResponse(Map<String, Object> responseBody) {
try {
return new Response.Builder()
.status(200)
.body(SERIALIZER.serialize(responseBody, SerializerEncoding.JSON))
.build();
} catch (IOException e) {
return new Response.Builder()
.status(400)
.body("Mock server error: " + e.getMessage())
.build();
}
}
private static class StateHolder {
private String nextLinkUrl;
private boolean firstPageRequested;
private boolean secondPageRequested;
}
} |
why do we need `AccessController.doPrivileged`? `AccessController` is to be removed, if this gets in, it had to be temporary solution. | public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetIdAsync(String vmssId) {
if (CoreUtils.isNullOrEmpty(vmssId)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmssId' is required and cannot be null.")));
}
Method listSinglePageAsync;
try {
listSinglePageAsync = inner().getClass().getDeclaredMethod("listByResourceGroupSinglePageAsync", String.class, String.class, ExpandTypeForListVMs.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
Method listNextSinglePageAsync;
try {
listNextSinglePageAsync = inner().getClass().getDeclaredMethod("listNextSinglePageAsync", String.class, Context.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
java.security.AccessController.doPrivileged((PrivilegedAction<Object>) () -> {
listSinglePageAsync.setAccessible(true);
listNextSinglePageAsync.setAccessible(true);
return null;
});
return new PagedFlux<>(
() -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listSinglePageAsync.invoke(inner(), ResourceUtils.groupFromResourceId(vmssId), String.format("'virtualMachineScaleSet/id' eq '%s'", vmssId), null);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
},
nextLink -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listNextSinglePageAsync.invoke(inner(), ResourceUtils.encodeResourceId(nextLink), Context.NONE);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
});
} | java.security.AccessController.doPrivileged((PrivilegedAction<Object>) () -> { | public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetIdAsync(String vmssId) {
if (CoreUtils.isNullOrEmpty(vmssId)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmssId' is required and cannot be null.")));
}
Method listSinglePageAsync;
try {
listSinglePageAsync = inner().getClass().getDeclaredMethod("listByResourceGroupSinglePageAsync", String.class, String.class, ExpandTypeForListVMs.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
Method listNextSinglePageAsync;
try {
listNextSinglePageAsync = inner().getClass().getDeclaredMethod("listNextSinglePageAsync", String.class, Context.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
java.security.AccessController.doPrivileged((PrivilegedAction<Object>) () -> {
listSinglePageAsync.setAccessible(true);
listNextSinglePageAsync.setAccessible(true);
return null;
});
return new PagedFlux<>(
() -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listSinglePageAsync.invoke(inner(), ResourceUtils.groupFromResourceId(vmssId), String.format("'virtualMachineScaleSet/id' eq '%s'", vmssId), null);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
},
nextLink -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listNextSinglePageAsync.invoke(inner(), ResourceUtils.encodeResourceId(nextLink), Context.NONE);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
});
} | class VirtualMachinesImpl
extends TopLevelModifiableResourcesImpl<
VirtualMachine, VirtualMachineImpl, VirtualMachineInner, VirtualMachinesClient, ComputeManager>
implements VirtualMachines {
private final StorageManager storageManager;
private final NetworkManager networkManager;
private final AuthorizationManager authorizationManager;
private final VirtualMachineSizesImpl vmSizes;
private final ClientLogger logger = new ClientLogger(VirtualMachinesImpl.class);
public VirtualMachinesImpl(
ComputeManager computeManager,
StorageManager storageManager,
NetworkManager networkManager,
AuthorizationManager authorizationManager) {
super(computeManager.serviceClient().getVirtualMachines(), computeManager);
this.storageManager = storageManager;
this.networkManager = networkManager;
this.authorizationManager = authorizationManager;
this.vmSizes = new VirtualMachineSizesImpl(computeManager.serviceClient().getVirtualMachineSizes());
}
@Override
public VirtualMachine.DefinitionStages.Blank define(String name) {
return wrapModel(name);
}
@Override
public void deallocate(String groupName, String name) {
this.inner().deallocate(groupName, name);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name) {
return this.inner().deallocateAsync(groupName, name);
}
@Override
public void deallocate(String groupName, String name, boolean hibernate) {
this.inner().deallocate(groupName, name, hibernate, Context.NONE);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name, boolean hibernate) {
return this.inner().deallocateAsync(groupName, name, hibernate);
}
@Override
public void generalize(String groupName, String name) {
this.inner().generalize(groupName, name);
}
@Override
public Mono<Void> generalizeAsync(String groupName, String name) {
return this.inner().generalizeAsync(groupName, name);
}
@Override
public void powerOff(String groupName, String name) {
this.powerOffAsync(groupName, name).block();
}
@Override
public Mono<Void> powerOffAsync(String groupName, String name) {
return this.inner().powerOffAsync(groupName, name, null);
}
@Override
public void restart(String groupName, String name) {
this.inner().restart(groupName, name);
}
@Override
public Mono<Void> restartAsync(String groupName, String name) {
return this.inner().restartAsync(groupName, name);
}
@Override
public void start(String groupName, String name) {
this.inner().start(groupName, name);
}
@Override
public Mono<Void> startAsync(String groupName, String name) {
return this.inner().startAsync(groupName, name);
}
@Override
public void redeploy(String groupName, String name) {
this.inner().redeploy(groupName, name);
}
@Override
public Mono<Void> redeployAsync(String groupName, String name) {
return this.inner().redeployAsync(groupName, name);
}
@Override
public String capture(String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
return this.captureAsync(groupName, name, containerName, vhdPrefix, overwriteVhd).block();
}
@Override
public Mono<String> captureAsync(
String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters();
parameters.withDestinationContainerName(containerName);
parameters.withOverwriteVhds(overwriteVhd);
parameters.withVhdPrefix(vhdPrefix);
return this
.inner()
.captureAsync(groupName, name, parameters)
.map(
captureResultInner -> {
try {
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(captureResultInner);
} catch (JsonProcessingException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(ex));
}
});
}
@Override
public void migrateToManaged(String groupName, String name) {
this.inner().convertToManagedDisks(groupName, name);
}
@Override
public Mono<Void> migrateToManagedAsync(String groupName, String name) {
return this.inner().convertToManagedDisksAsync(groupName, name);
}
@Override
public RunCommandResult runPowerShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runPowerShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runPowerShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunPowerShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runCommand(String groupName, String name, RunCommandInput inputCommand) {
return this.runCommandAsync(groupName, name, inputCommand).block();
}
@Override
public Mono<RunCommandResult> runCommandAsync(String groupName, String name, RunCommandInput inputCommand) {
return this.inner().runCommandAsync(groupName, name, inputCommand).map(RunCommandResultImpl::new);
}
@Override
public Accepted<Void> beginDeleteById(String id) {
return beginDeleteByResourceGroup(ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id));
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, null).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public void deleteById(String id, boolean forceDeletion) {
deleteByIdAsync(id, forceDeletion).block();
}
@Override
public Mono<Void> deleteByIdAsync(String id, boolean forceDeletion) {
return deleteByResourceGroupAsync(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
deleteByResourceGroupAsync(resourceGroupName, name, forceDeletion).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name, boolean forceDeletion) {
return this.inner().deleteAsync(resourceGroupName, name, forceDeletion);
}
@Override
public Accepted<Void> beginDeleteById(String id, boolean forceDeletion) {
return beginDeleteByResourceGroup(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, forceDeletion).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSetId(String vmssId) {
return new PagedIterable<>(this.listByVirtualMachineScaleSetIdAsync(vmssId));
}
@Override
@SuppressWarnings({"unchecked", "removal"})
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSet(VirtualMachineScaleSet vmss) {
return new PagedIterable<>(listByVirtualMachineScaleSetAsync(vmss));
}
@Override
public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetAsync(VirtualMachineScaleSet vmss) {
if (vmss == null) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmss' is required and cannot be null.")));
}
return listByVirtualMachineScaleSetIdAsync(vmss.id());
}
@Override
public VirtualMachineSizes sizes() {
return this.vmSizes;
}
@Override
protected VirtualMachineImpl wrapModel(String name) {
VirtualMachineInner inner = new VirtualMachineInner();
inner.withStorageProfile(new StorageProfile().withOsDisk(new OSDisk()).withDataDisks(new ArrayList<>()));
inner.withOsProfile(new OSProfile());
inner.withHardwareProfile(new HardwareProfile());
inner.withNetworkProfile(new NetworkProfile().withNetworkInterfaces(new ArrayList<>()));
return new VirtualMachineImpl(
name, inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager);
}
@Override
protected VirtualMachineImpl wrapModel(VirtualMachineInner virtualMachineInner) {
if (virtualMachineInner == null) {
return null;
}
return new VirtualMachineImpl(
virtualMachineInner.name(),
virtualMachineInner,
this.manager(),
this.storageManager,
this.networkManager,
this.authorizationManager);
}
} | class VirtualMachinesImpl
extends TopLevelModifiableResourcesImpl<
VirtualMachine, VirtualMachineImpl, VirtualMachineInner, VirtualMachinesClient, ComputeManager>
implements VirtualMachines {
private final StorageManager storageManager;
private final NetworkManager networkManager;
private final AuthorizationManager authorizationManager;
private final VirtualMachineSizesImpl vmSizes;
private final ClientLogger logger = new ClientLogger(VirtualMachinesImpl.class);
public VirtualMachinesImpl(
ComputeManager computeManager,
StorageManager storageManager,
NetworkManager networkManager,
AuthorizationManager authorizationManager) {
super(computeManager.serviceClient().getVirtualMachines(), computeManager);
this.storageManager = storageManager;
this.networkManager = networkManager;
this.authorizationManager = authorizationManager;
this.vmSizes = new VirtualMachineSizesImpl(computeManager.serviceClient().getVirtualMachineSizes());
}
@Override
public VirtualMachine.DefinitionStages.Blank define(String name) {
return wrapModel(name);
}
@Override
public void deallocate(String groupName, String name) {
this.inner().deallocate(groupName, name);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name) {
return this.inner().deallocateAsync(groupName, name);
}
@Override
public void deallocate(String groupName, String name, boolean hibernate) {
this.inner().deallocate(groupName, name, hibernate, Context.NONE);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name, boolean hibernate) {
return this.inner().deallocateAsync(groupName, name, hibernate);
}
@Override
public void generalize(String groupName, String name) {
this.inner().generalize(groupName, name);
}
@Override
public Mono<Void> generalizeAsync(String groupName, String name) {
return this.inner().generalizeAsync(groupName, name);
}
@Override
public void powerOff(String groupName, String name) {
this.powerOffAsync(groupName, name).block();
}
@Override
public Mono<Void> powerOffAsync(String groupName, String name) {
return this.inner().powerOffAsync(groupName, name, null);
}
@Override
public void restart(String groupName, String name) {
this.inner().restart(groupName, name);
}
@Override
public Mono<Void> restartAsync(String groupName, String name) {
return this.inner().restartAsync(groupName, name);
}
@Override
public void start(String groupName, String name) {
this.inner().start(groupName, name);
}
@Override
public Mono<Void> startAsync(String groupName, String name) {
return this.inner().startAsync(groupName, name);
}
@Override
public void redeploy(String groupName, String name) {
this.inner().redeploy(groupName, name);
}
@Override
public Mono<Void> redeployAsync(String groupName, String name) {
return this.inner().redeployAsync(groupName, name);
}
@Override
public String capture(String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
return this.captureAsync(groupName, name, containerName, vhdPrefix, overwriteVhd).block();
}
@Override
public Mono<String> captureAsync(
String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters();
parameters.withDestinationContainerName(containerName);
parameters.withOverwriteVhds(overwriteVhd);
parameters.withVhdPrefix(vhdPrefix);
return this
.inner()
.captureAsync(groupName, name, parameters)
.map(
captureResultInner -> {
try {
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(captureResultInner);
} catch (JsonProcessingException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(ex));
}
});
}
@Override
public void migrateToManaged(String groupName, String name) {
this.inner().convertToManagedDisks(groupName, name);
}
@Override
public Mono<Void> migrateToManagedAsync(String groupName, String name) {
return this.inner().convertToManagedDisksAsync(groupName, name);
}
@Override
public RunCommandResult runPowerShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runPowerShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runPowerShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunPowerShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runCommand(String groupName, String name, RunCommandInput inputCommand) {
return this.runCommandAsync(groupName, name, inputCommand).block();
}
@Override
public Mono<RunCommandResult> runCommandAsync(String groupName, String name, RunCommandInput inputCommand) {
return this.inner().runCommandAsync(groupName, name, inputCommand).map(RunCommandResultImpl::new);
}
@Override
public Accepted<Void> beginDeleteById(String id) {
return beginDeleteByResourceGroup(ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id));
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, null).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public void deleteById(String id, boolean forceDeletion) {
deleteByIdAsync(id, forceDeletion).block();
}
@Override
public Mono<Void> deleteByIdAsync(String id, boolean forceDeletion) {
return deleteByResourceGroupAsync(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
deleteByResourceGroupAsync(resourceGroupName, name, forceDeletion).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name, boolean forceDeletion) {
return this.inner().deleteAsync(resourceGroupName, name, forceDeletion);
}
@Override
public Accepted<Void> beginDeleteById(String id, boolean forceDeletion) {
return beginDeleteByResourceGroup(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, forceDeletion).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSetId(String vmssId) {
return new PagedIterable<>(this.listByVirtualMachineScaleSetIdAsync(vmssId));
}
@Override
@SuppressWarnings({"unchecked", "removal"})
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSet(VirtualMachineScaleSet vmss) {
return new PagedIterable<>(listByVirtualMachineScaleSetAsync(vmss));
}
@Override
public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetAsync(VirtualMachineScaleSet vmss) {
if (vmss == null) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmss' is required and cannot be null.")));
}
return listByVirtualMachineScaleSetIdAsync(vmss.id());
}
@Override
public VirtualMachineSizes sizes() {
return this.vmSizes;
}
@Override
protected VirtualMachineImpl wrapModel(String name) {
VirtualMachineInner inner = new VirtualMachineInner();
inner.withStorageProfile(new StorageProfile().withOsDisk(new OSDisk()).withDataDisks(new ArrayList<>()));
inner.withOsProfile(new OSProfile());
inner.withHardwareProfile(new HardwareProfile());
inner.withNetworkProfile(new NetworkProfile().withNetworkInterfaces(new ArrayList<>()));
return new VirtualMachineImpl(
name, inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager);
}
@Override
protected VirtualMachineImpl wrapModel(VirtualMachineInner virtualMachineInner) {
if (virtualMachineInner == null) {
return null;
}
return new VirtualMachineImpl(
virtualMachineInner.name(),
virtualMachineInner,
this.manager(),
this.storageManager,
this.networkManager,
this.authorizationManager);
}
} |
Yeah, it's required by CI (Build Analyze phase `DP_DO_INSIDE_DO_PRIVILEGED`). Probably have to go with `Copy` solution. | public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetIdAsync(String vmssId) {
if (CoreUtils.isNullOrEmpty(vmssId)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmssId' is required and cannot be null.")));
}
Method listSinglePageAsync;
try {
listSinglePageAsync = inner().getClass().getDeclaredMethod("listByResourceGroupSinglePageAsync", String.class, String.class, ExpandTypeForListVMs.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
Method listNextSinglePageAsync;
try {
listNextSinglePageAsync = inner().getClass().getDeclaredMethod("listNextSinglePageAsync", String.class, Context.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
java.security.AccessController.doPrivileged((PrivilegedAction<Object>) () -> {
listSinglePageAsync.setAccessible(true);
listNextSinglePageAsync.setAccessible(true);
return null;
});
return new PagedFlux<>(
() -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listSinglePageAsync.invoke(inner(), ResourceUtils.groupFromResourceId(vmssId), String.format("'virtualMachineScaleSet/id' eq '%s'", vmssId), null);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
},
nextLink -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listNextSinglePageAsync.invoke(inner(), ResourceUtils.encodeResourceId(nextLink), Context.NONE);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
});
} | java.security.AccessController.doPrivileged((PrivilegedAction<Object>) () -> { | public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetIdAsync(String vmssId) {
if (CoreUtils.isNullOrEmpty(vmssId)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmssId' is required and cannot be null.")));
}
Method listSinglePageAsync;
try {
listSinglePageAsync = inner().getClass().getDeclaredMethod("listByResourceGroupSinglePageAsync", String.class, String.class, ExpandTypeForListVMs.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
Method listNextSinglePageAsync;
try {
listNextSinglePageAsync = inner().getClass().getDeclaredMethod("listNextSinglePageAsync", String.class, Context.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
java.security.AccessController.doPrivileged((PrivilegedAction<Object>) () -> {
listSinglePageAsync.setAccessible(true);
listNextSinglePageAsync.setAccessible(true);
return null;
});
return new PagedFlux<>(
() -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listSinglePageAsync.invoke(inner(), ResourceUtils.groupFromResourceId(vmssId), String.format("'virtualMachineScaleSet/id' eq '%s'", vmssId), null);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
},
nextLink -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listNextSinglePageAsync.invoke(inner(), ResourceUtils.encodeResourceId(nextLink), Context.NONE);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
});
} | class VirtualMachinesImpl
extends TopLevelModifiableResourcesImpl<
VirtualMachine, VirtualMachineImpl, VirtualMachineInner, VirtualMachinesClient, ComputeManager>
implements VirtualMachines {
private final StorageManager storageManager;
private final NetworkManager networkManager;
private final AuthorizationManager authorizationManager;
private final VirtualMachineSizesImpl vmSizes;
private final ClientLogger logger = new ClientLogger(VirtualMachinesImpl.class);
public VirtualMachinesImpl(
ComputeManager computeManager,
StorageManager storageManager,
NetworkManager networkManager,
AuthorizationManager authorizationManager) {
super(computeManager.serviceClient().getVirtualMachines(), computeManager);
this.storageManager = storageManager;
this.networkManager = networkManager;
this.authorizationManager = authorizationManager;
this.vmSizes = new VirtualMachineSizesImpl(computeManager.serviceClient().getVirtualMachineSizes());
}
@Override
public VirtualMachine.DefinitionStages.Blank define(String name) {
return wrapModel(name);
}
@Override
public void deallocate(String groupName, String name) {
this.inner().deallocate(groupName, name);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name) {
return this.inner().deallocateAsync(groupName, name);
}
@Override
public void deallocate(String groupName, String name, boolean hibernate) {
this.inner().deallocate(groupName, name, hibernate, Context.NONE);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name, boolean hibernate) {
return this.inner().deallocateAsync(groupName, name, hibernate);
}
@Override
public void generalize(String groupName, String name) {
this.inner().generalize(groupName, name);
}
@Override
public Mono<Void> generalizeAsync(String groupName, String name) {
return this.inner().generalizeAsync(groupName, name);
}
@Override
public void powerOff(String groupName, String name) {
this.powerOffAsync(groupName, name).block();
}
@Override
public Mono<Void> powerOffAsync(String groupName, String name) {
return this.inner().powerOffAsync(groupName, name, null);
}
@Override
public void restart(String groupName, String name) {
this.inner().restart(groupName, name);
}
@Override
public Mono<Void> restartAsync(String groupName, String name) {
return this.inner().restartAsync(groupName, name);
}
@Override
public void start(String groupName, String name) {
this.inner().start(groupName, name);
}
@Override
public Mono<Void> startAsync(String groupName, String name) {
return this.inner().startAsync(groupName, name);
}
@Override
public void redeploy(String groupName, String name) {
this.inner().redeploy(groupName, name);
}
@Override
public Mono<Void> redeployAsync(String groupName, String name) {
return this.inner().redeployAsync(groupName, name);
}
@Override
public String capture(String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
return this.captureAsync(groupName, name, containerName, vhdPrefix, overwriteVhd).block();
}
@Override
public Mono<String> captureAsync(
String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters();
parameters.withDestinationContainerName(containerName);
parameters.withOverwriteVhds(overwriteVhd);
parameters.withVhdPrefix(vhdPrefix);
return this
.inner()
.captureAsync(groupName, name, parameters)
.map(
captureResultInner -> {
try {
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(captureResultInner);
} catch (JsonProcessingException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(ex));
}
});
}
@Override
public void migrateToManaged(String groupName, String name) {
this.inner().convertToManagedDisks(groupName, name);
}
@Override
public Mono<Void> migrateToManagedAsync(String groupName, String name) {
return this.inner().convertToManagedDisksAsync(groupName, name);
}
@Override
public RunCommandResult runPowerShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runPowerShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runPowerShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunPowerShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runCommand(String groupName, String name, RunCommandInput inputCommand) {
return this.runCommandAsync(groupName, name, inputCommand).block();
}
@Override
public Mono<RunCommandResult> runCommandAsync(String groupName, String name, RunCommandInput inputCommand) {
return this.inner().runCommandAsync(groupName, name, inputCommand).map(RunCommandResultImpl::new);
}
@Override
public Accepted<Void> beginDeleteById(String id) {
return beginDeleteByResourceGroup(ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id));
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, null).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public void deleteById(String id, boolean forceDeletion) {
deleteByIdAsync(id, forceDeletion).block();
}
@Override
public Mono<Void> deleteByIdAsync(String id, boolean forceDeletion) {
return deleteByResourceGroupAsync(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
deleteByResourceGroupAsync(resourceGroupName, name, forceDeletion).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name, boolean forceDeletion) {
return this.inner().deleteAsync(resourceGroupName, name, forceDeletion);
}
@Override
public Accepted<Void> beginDeleteById(String id, boolean forceDeletion) {
return beginDeleteByResourceGroup(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, forceDeletion).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSetId(String vmssId) {
return new PagedIterable<>(this.listByVirtualMachineScaleSetIdAsync(vmssId));
}
@Override
@SuppressWarnings({"unchecked", "removal"})
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSet(VirtualMachineScaleSet vmss) {
return new PagedIterable<>(listByVirtualMachineScaleSetAsync(vmss));
}
@Override
public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetAsync(VirtualMachineScaleSet vmss) {
if (vmss == null) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmss' is required and cannot be null.")));
}
return listByVirtualMachineScaleSetIdAsync(vmss.id());
}
@Override
public VirtualMachineSizes sizes() {
return this.vmSizes;
}
@Override
protected VirtualMachineImpl wrapModel(String name) {
VirtualMachineInner inner = new VirtualMachineInner();
inner.withStorageProfile(new StorageProfile().withOsDisk(new OSDisk()).withDataDisks(new ArrayList<>()));
inner.withOsProfile(new OSProfile());
inner.withHardwareProfile(new HardwareProfile());
inner.withNetworkProfile(new NetworkProfile().withNetworkInterfaces(new ArrayList<>()));
return new VirtualMachineImpl(
name, inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager);
}
@Override
protected VirtualMachineImpl wrapModel(VirtualMachineInner virtualMachineInner) {
if (virtualMachineInner == null) {
return null;
}
return new VirtualMachineImpl(
virtualMachineInner.name(),
virtualMachineInner,
this.manager(),
this.storageManager,
this.networkManager,
this.authorizationManager);
}
} | class VirtualMachinesImpl
extends TopLevelModifiableResourcesImpl<
VirtualMachine, VirtualMachineImpl, VirtualMachineInner, VirtualMachinesClient, ComputeManager>
implements VirtualMachines {
private final StorageManager storageManager;
private final NetworkManager networkManager;
private final AuthorizationManager authorizationManager;
private final VirtualMachineSizesImpl vmSizes;
private final ClientLogger logger = new ClientLogger(VirtualMachinesImpl.class);
public VirtualMachinesImpl(
ComputeManager computeManager,
StorageManager storageManager,
NetworkManager networkManager,
AuthorizationManager authorizationManager) {
super(computeManager.serviceClient().getVirtualMachines(), computeManager);
this.storageManager = storageManager;
this.networkManager = networkManager;
this.authorizationManager = authorizationManager;
this.vmSizes = new VirtualMachineSizesImpl(computeManager.serviceClient().getVirtualMachineSizes());
}
@Override
public VirtualMachine.DefinitionStages.Blank define(String name) {
return wrapModel(name);
}
@Override
public void deallocate(String groupName, String name) {
this.inner().deallocate(groupName, name);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name) {
return this.inner().deallocateAsync(groupName, name);
}
@Override
public void deallocate(String groupName, String name, boolean hibernate) {
this.inner().deallocate(groupName, name, hibernate, Context.NONE);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name, boolean hibernate) {
return this.inner().deallocateAsync(groupName, name, hibernate);
}
@Override
public void generalize(String groupName, String name) {
this.inner().generalize(groupName, name);
}
@Override
public Mono<Void> generalizeAsync(String groupName, String name) {
return this.inner().generalizeAsync(groupName, name);
}
@Override
public void powerOff(String groupName, String name) {
this.powerOffAsync(groupName, name).block();
}
@Override
public Mono<Void> powerOffAsync(String groupName, String name) {
return this.inner().powerOffAsync(groupName, name, null);
}
@Override
public void restart(String groupName, String name) {
this.inner().restart(groupName, name);
}
@Override
public Mono<Void> restartAsync(String groupName, String name) {
return this.inner().restartAsync(groupName, name);
}
@Override
public void start(String groupName, String name) {
this.inner().start(groupName, name);
}
@Override
public Mono<Void> startAsync(String groupName, String name) {
return this.inner().startAsync(groupName, name);
}
@Override
public void redeploy(String groupName, String name) {
this.inner().redeploy(groupName, name);
}
@Override
public Mono<Void> redeployAsync(String groupName, String name) {
return this.inner().redeployAsync(groupName, name);
}
@Override
public String capture(String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
return this.captureAsync(groupName, name, containerName, vhdPrefix, overwriteVhd).block();
}
@Override
public Mono<String> captureAsync(
String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters();
parameters.withDestinationContainerName(containerName);
parameters.withOverwriteVhds(overwriteVhd);
parameters.withVhdPrefix(vhdPrefix);
return this
.inner()
.captureAsync(groupName, name, parameters)
.map(
captureResultInner -> {
try {
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(captureResultInner);
} catch (JsonProcessingException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(ex));
}
});
}
@Override
public void migrateToManaged(String groupName, String name) {
this.inner().convertToManagedDisks(groupName, name);
}
@Override
public Mono<Void> migrateToManagedAsync(String groupName, String name) {
return this.inner().convertToManagedDisksAsync(groupName, name);
}
@Override
public RunCommandResult runPowerShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runPowerShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runPowerShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunPowerShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runCommand(String groupName, String name, RunCommandInput inputCommand) {
return this.runCommandAsync(groupName, name, inputCommand).block();
}
@Override
public Mono<RunCommandResult> runCommandAsync(String groupName, String name, RunCommandInput inputCommand) {
return this.inner().runCommandAsync(groupName, name, inputCommand).map(RunCommandResultImpl::new);
}
@Override
public Accepted<Void> beginDeleteById(String id) {
return beginDeleteByResourceGroup(ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id));
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, null).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public void deleteById(String id, boolean forceDeletion) {
deleteByIdAsync(id, forceDeletion).block();
}
@Override
public Mono<Void> deleteByIdAsync(String id, boolean forceDeletion) {
return deleteByResourceGroupAsync(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
deleteByResourceGroupAsync(resourceGroupName, name, forceDeletion).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name, boolean forceDeletion) {
return this.inner().deleteAsync(resourceGroupName, name, forceDeletion);
}
@Override
public Accepted<Void> beginDeleteById(String id, boolean forceDeletion) {
return beginDeleteByResourceGroup(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, forceDeletion).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSetId(String vmssId) {
return new PagedIterable<>(this.listByVirtualMachineScaleSetIdAsync(vmssId));
}
@Override
@SuppressWarnings({"unchecked", "removal"})
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSet(VirtualMachineScaleSet vmss) {
return new PagedIterable<>(listByVirtualMachineScaleSetAsync(vmss));
}
@Override
public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetAsync(VirtualMachineScaleSet vmss) {
if (vmss == null) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmss' is required and cannot be null.")));
}
return listByVirtualMachineScaleSetIdAsync(vmss.id());
}
@Override
public VirtualMachineSizes sizes() {
return this.vmSizes;
}
@Override
protected VirtualMachineImpl wrapModel(String name) {
VirtualMachineInner inner = new VirtualMachineInner();
inner.withStorageProfile(new StorageProfile().withOsDisk(new OSDisk()).withDataDisks(new ArrayList<>()));
inner.withOsProfile(new OSProfile());
inner.withHardwareProfile(new HardwareProfile());
inner.withNetworkProfile(new NetworkProfile().withNetworkInterfaces(new ArrayList<>()));
return new VirtualMachineImpl(
name, inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager);
}
@Override
protected VirtualMachineImpl wrapModel(VirtualMachineInner virtualMachineInner) {
if (virtualMachineInner == null) {
return null;
}
return new VirtualMachineImpl(
virtualMachineInner.name(),
virtualMachineInner,
this.manager(),
this.storageManager,
this.networkManager,
this.authorizationManager);
}
} |
OK, we still want service to fix this issue. We would avoid this as much as possible in long run. (maybe before end of the year) | public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetIdAsync(String vmssId) {
if (CoreUtils.isNullOrEmpty(vmssId)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmssId' is required and cannot be null.")));
}
Method listSinglePageAsync;
try {
listSinglePageAsync = inner().getClass().getDeclaredMethod("listByResourceGroupSinglePageAsync", String.class, String.class, ExpandTypeForListVMs.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
Method listNextSinglePageAsync;
try {
listNextSinglePageAsync = inner().getClass().getDeclaredMethod("listNextSinglePageAsync", String.class, Context.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
java.security.AccessController.doPrivileged((PrivilegedAction<Object>) () -> {
listSinglePageAsync.setAccessible(true);
listNextSinglePageAsync.setAccessible(true);
return null;
});
return new PagedFlux<>(
() -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listSinglePageAsync.invoke(inner(), ResourceUtils.groupFromResourceId(vmssId), String.format("'virtualMachineScaleSet/id' eq '%s'", vmssId), null);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
},
nextLink -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listNextSinglePageAsync.invoke(inner(), ResourceUtils.encodeResourceId(nextLink), Context.NONE);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
});
} | java.security.AccessController.doPrivileged((PrivilegedAction<Object>) () -> { | public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetIdAsync(String vmssId) {
if (CoreUtils.isNullOrEmpty(vmssId)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmssId' is required and cannot be null.")));
}
Method listSinglePageAsync;
try {
listSinglePageAsync = inner().getClass().getDeclaredMethod("listByResourceGroupSinglePageAsync", String.class, String.class, ExpandTypeForListVMs.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
Method listNextSinglePageAsync;
try {
listNextSinglePageAsync = inner().getClass().getDeclaredMethod("listNextSinglePageAsync", String.class, Context.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
java.security.AccessController.doPrivileged((PrivilegedAction<Object>) () -> {
listSinglePageAsync.setAccessible(true);
listNextSinglePageAsync.setAccessible(true);
return null;
});
return new PagedFlux<>(
() -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listSinglePageAsync.invoke(inner(), ResourceUtils.groupFromResourceId(vmssId), String.format("'virtualMachineScaleSet/id' eq '%s'", vmssId), null);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
},
nextLink -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listNextSinglePageAsync.invoke(inner(), ResourceUtils.encodeResourceId(nextLink), Context.NONE);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
});
} | class VirtualMachinesImpl
extends TopLevelModifiableResourcesImpl<
VirtualMachine, VirtualMachineImpl, VirtualMachineInner, VirtualMachinesClient, ComputeManager>
implements VirtualMachines {
private final StorageManager storageManager;
private final NetworkManager networkManager;
private final AuthorizationManager authorizationManager;
private final VirtualMachineSizesImpl vmSizes;
private final ClientLogger logger = new ClientLogger(VirtualMachinesImpl.class);
public VirtualMachinesImpl(
ComputeManager computeManager,
StorageManager storageManager,
NetworkManager networkManager,
AuthorizationManager authorizationManager) {
super(computeManager.serviceClient().getVirtualMachines(), computeManager);
this.storageManager = storageManager;
this.networkManager = networkManager;
this.authorizationManager = authorizationManager;
this.vmSizes = new VirtualMachineSizesImpl(computeManager.serviceClient().getVirtualMachineSizes());
}
@Override
public VirtualMachine.DefinitionStages.Blank define(String name) {
return wrapModel(name);
}
@Override
public void deallocate(String groupName, String name) {
this.inner().deallocate(groupName, name);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name) {
return this.inner().deallocateAsync(groupName, name);
}
@Override
public void deallocate(String groupName, String name, boolean hibernate) {
this.inner().deallocate(groupName, name, hibernate, Context.NONE);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name, boolean hibernate) {
return this.inner().deallocateAsync(groupName, name, hibernate);
}
@Override
public void generalize(String groupName, String name) {
this.inner().generalize(groupName, name);
}
@Override
public Mono<Void> generalizeAsync(String groupName, String name) {
return this.inner().generalizeAsync(groupName, name);
}
@Override
public void powerOff(String groupName, String name) {
this.powerOffAsync(groupName, name).block();
}
@Override
public Mono<Void> powerOffAsync(String groupName, String name) {
return this.inner().powerOffAsync(groupName, name, null);
}
@Override
public void restart(String groupName, String name) {
this.inner().restart(groupName, name);
}
@Override
public Mono<Void> restartAsync(String groupName, String name) {
return this.inner().restartAsync(groupName, name);
}
@Override
public void start(String groupName, String name) {
this.inner().start(groupName, name);
}
@Override
public Mono<Void> startAsync(String groupName, String name) {
return this.inner().startAsync(groupName, name);
}
@Override
public void redeploy(String groupName, String name) {
this.inner().redeploy(groupName, name);
}
@Override
public Mono<Void> redeployAsync(String groupName, String name) {
return this.inner().redeployAsync(groupName, name);
}
@Override
public String capture(String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
return this.captureAsync(groupName, name, containerName, vhdPrefix, overwriteVhd).block();
}
@Override
public Mono<String> captureAsync(
String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters();
parameters.withDestinationContainerName(containerName);
parameters.withOverwriteVhds(overwriteVhd);
parameters.withVhdPrefix(vhdPrefix);
return this
.inner()
.captureAsync(groupName, name, parameters)
.map(
captureResultInner -> {
try {
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(captureResultInner);
} catch (JsonProcessingException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(ex));
}
});
}
@Override
public void migrateToManaged(String groupName, String name) {
this.inner().convertToManagedDisks(groupName, name);
}
@Override
public Mono<Void> migrateToManagedAsync(String groupName, String name) {
return this.inner().convertToManagedDisksAsync(groupName, name);
}
@Override
public RunCommandResult runPowerShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runPowerShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runPowerShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunPowerShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runCommand(String groupName, String name, RunCommandInput inputCommand) {
return this.runCommandAsync(groupName, name, inputCommand).block();
}
@Override
public Mono<RunCommandResult> runCommandAsync(String groupName, String name, RunCommandInput inputCommand) {
return this.inner().runCommandAsync(groupName, name, inputCommand).map(RunCommandResultImpl::new);
}
@Override
public Accepted<Void> beginDeleteById(String id) {
return beginDeleteByResourceGroup(ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id));
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, null).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public void deleteById(String id, boolean forceDeletion) {
deleteByIdAsync(id, forceDeletion).block();
}
@Override
public Mono<Void> deleteByIdAsync(String id, boolean forceDeletion) {
return deleteByResourceGroupAsync(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
deleteByResourceGroupAsync(resourceGroupName, name, forceDeletion).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name, boolean forceDeletion) {
return this.inner().deleteAsync(resourceGroupName, name, forceDeletion);
}
@Override
public Accepted<Void> beginDeleteById(String id, boolean forceDeletion) {
return beginDeleteByResourceGroup(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, forceDeletion).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSetId(String vmssId) {
return new PagedIterable<>(this.listByVirtualMachineScaleSetIdAsync(vmssId));
}
@Override
@SuppressWarnings({"unchecked", "removal"})
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSet(VirtualMachineScaleSet vmss) {
return new PagedIterable<>(listByVirtualMachineScaleSetAsync(vmss));
}
@Override
public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetAsync(VirtualMachineScaleSet vmss) {
if (vmss == null) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmss' is required and cannot be null.")));
}
return listByVirtualMachineScaleSetIdAsync(vmss.id());
}
@Override
public VirtualMachineSizes sizes() {
return this.vmSizes;
}
@Override
protected VirtualMachineImpl wrapModel(String name) {
VirtualMachineInner inner = new VirtualMachineInner();
inner.withStorageProfile(new StorageProfile().withOsDisk(new OSDisk()).withDataDisks(new ArrayList<>()));
inner.withOsProfile(new OSProfile());
inner.withHardwareProfile(new HardwareProfile());
inner.withNetworkProfile(new NetworkProfile().withNetworkInterfaces(new ArrayList<>()));
return new VirtualMachineImpl(
name, inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager);
}
@Override
protected VirtualMachineImpl wrapModel(VirtualMachineInner virtualMachineInner) {
if (virtualMachineInner == null) {
return null;
}
return new VirtualMachineImpl(
virtualMachineInner.name(),
virtualMachineInner,
this.manager(),
this.storageManager,
this.networkManager,
this.authorizationManager);
}
} | class VirtualMachinesImpl
extends TopLevelModifiableResourcesImpl<
VirtualMachine, VirtualMachineImpl, VirtualMachineInner, VirtualMachinesClient, ComputeManager>
implements VirtualMachines {
private final StorageManager storageManager;
private final NetworkManager networkManager;
private final AuthorizationManager authorizationManager;
private final VirtualMachineSizesImpl vmSizes;
private final ClientLogger logger = new ClientLogger(VirtualMachinesImpl.class);
public VirtualMachinesImpl(
ComputeManager computeManager,
StorageManager storageManager,
NetworkManager networkManager,
AuthorizationManager authorizationManager) {
super(computeManager.serviceClient().getVirtualMachines(), computeManager);
this.storageManager = storageManager;
this.networkManager = networkManager;
this.authorizationManager = authorizationManager;
this.vmSizes = new VirtualMachineSizesImpl(computeManager.serviceClient().getVirtualMachineSizes());
}
@Override
public VirtualMachine.DefinitionStages.Blank define(String name) {
return wrapModel(name);
}
@Override
public void deallocate(String groupName, String name) {
this.inner().deallocate(groupName, name);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name) {
return this.inner().deallocateAsync(groupName, name);
}
@Override
public void deallocate(String groupName, String name, boolean hibernate) {
this.inner().deallocate(groupName, name, hibernate, Context.NONE);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name, boolean hibernate) {
return this.inner().deallocateAsync(groupName, name, hibernate);
}
@Override
public void generalize(String groupName, String name) {
this.inner().generalize(groupName, name);
}
@Override
public Mono<Void> generalizeAsync(String groupName, String name) {
return this.inner().generalizeAsync(groupName, name);
}
@Override
public void powerOff(String groupName, String name) {
this.powerOffAsync(groupName, name).block();
}
@Override
public Mono<Void> powerOffAsync(String groupName, String name) {
return this.inner().powerOffAsync(groupName, name, null);
}
@Override
public void restart(String groupName, String name) {
this.inner().restart(groupName, name);
}
@Override
public Mono<Void> restartAsync(String groupName, String name) {
return this.inner().restartAsync(groupName, name);
}
@Override
public void start(String groupName, String name) {
this.inner().start(groupName, name);
}
@Override
public Mono<Void> startAsync(String groupName, String name) {
return this.inner().startAsync(groupName, name);
}
@Override
public void redeploy(String groupName, String name) {
this.inner().redeploy(groupName, name);
}
@Override
public Mono<Void> redeployAsync(String groupName, String name) {
return this.inner().redeployAsync(groupName, name);
}
@Override
public String capture(String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
return this.captureAsync(groupName, name, containerName, vhdPrefix, overwriteVhd).block();
}
@Override
public Mono<String> captureAsync(
String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters();
parameters.withDestinationContainerName(containerName);
parameters.withOverwriteVhds(overwriteVhd);
parameters.withVhdPrefix(vhdPrefix);
return this
.inner()
.captureAsync(groupName, name, parameters)
.map(
captureResultInner -> {
try {
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(captureResultInner);
} catch (JsonProcessingException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(ex));
}
});
}
@Override
public void migrateToManaged(String groupName, String name) {
this.inner().convertToManagedDisks(groupName, name);
}
@Override
public Mono<Void> migrateToManagedAsync(String groupName, String name) {
return this.inner().convertToManagedDisksAsync(groupName, name);
}
@Override
public RunCommandResult runPowerShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runPowerShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runPowerShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunPowerShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runCommand(String groupName, String name, RunCommandInput inputCommand) {
return this.runCommandAsync(groupName, name, inputCommand).block();
}
@Override
public Mono<RunCommandResult> runCommandAsync(String groupName, String name, RunCommandInput inputCommand) {
return this.inner().runCommandAsync(groupName, name, inputCommand).map(RunCommandResultImpl::new);
}
@Override
public Accepted<Void> beginDeleteById(String id) {
return beginDeleteByResourceGroup(ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id));
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, null).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public void deleteById(String id, boolean forceDeletion) {
deleteByIdAsync(id, forceDeletion).block();
}
@Override
public Mono<Void> deleteByIdAsync(String id, boolean forceDeletion) {
return deleteByResourceGroupAsync(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
deleteByResourceGroupAsync(resourceGroupName, name, forceDeletion).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name, boolean forceDeletion) {
return this.inner().deleteAsync(resourceGroupName, name, forceDeletion);
}
@Override
public Accepted<Void> beginDeleteById(String id, boolean forceDeletion) {
return beginDeleteByResourceGroup(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, forceDeletion).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSetId(String vmssId) {
return new PagedIterable<>(this.listByVirtualMachineScaleSetIdAsync(vmssId));
}
@Override
@SuppressWarnings({"unchecked", "removal"})
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSet(VirtualMachineScaleSet vmss) {
return new PagedIterable<>(listByVirtualMachineScaleSetAsync(vmss));
}
@Override
public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetAsync(VirtualMachineScaleSet vmss) {
if (vmss == null) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmss' is required and cannot be null.")));
}
return listByVirtualMachineScaleSetIdAsync(vmss.id());
}
@Override
public VirtualMachineSizes sizes() {
return this.vmSizes;
}
@Override
protected VirtualMachineImpl wrapModel(String name) {
VirtualMachineInner inner = new VirtualMachineInner();
inner.withStorageProfile(new StorageProfile().withOsDisk(new OSDisk()).withDataDisks(new ArrayList<>()));
inner.withOsProfile(new OSProfile());
inner.withHardwareProfile(new HardwareProfile());
inner.withNetworkProfile(new NetworkProfile().withNetworkInterfaces(new ArrayList<>()));
return new VirtualMachineImpl(
name, inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager);
}
@Override
protected VirtualMachineImpl wrapModel(VirtualMachineInner virtualMachineInner) {
if (virtualMachineInner == null) {
return null;
}
return new VirtualMachineImpl(
virtualMachineInner.name(),
virtualMachineInner,
this.manager(),
this.storageManager,
this.networkManager,
this.authorizationManager);
}
} |
Sure. Is it OK we use reflection here and switch to `Copy` afterwards(if service doesn't fix it then)? `ExpandableStringEnum` uses `ReflectionUtils.getLookupToUse`, which use `AccessController` as well: https://github.com/Azure/azure-sdk-for-java/blob/b3c847da07ca9d1e3c1869d6d259b3fa291adc43/sdk/core/azure-core/src/main/java/com/azure/core/util/ExpandableStringEnum.java#L88 https://github.com/Azure/azure-sdk-for-java/blob/b3c847da07ca9d1e3c1869d6d259b3fa291adc43/sdk/core/azure-core/src/main/java/com/azure/core/implementation/ReflectionUtils.java#L171 We could switch implementation before core does. | public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetIdAsync(String vmssId) {
if (CoreUtils.isNullOrEmpty(vmssId)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmssId' is required and cannot be null.")));
}
Method listSinglePageAsync;
try {
listSinglePageAsync = inner().getClass().getDeclaredMethod("listByResourceGroupSinglePageAsync", String.class, String.class, ExpandTypeForListVMs.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
Method listNextSinglePageAsync;
try {
listNextSinglePageAsync = inner().getClass().getDeclaredMethod("listNextSinglePageAsync", String.class, Context.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
java.security.AccessController.doPrivileged((PrivilegedAction<Object>) () -> {
listSinglePageAsync.setAccessible(true);
listNextSinglePageAsync.setAccessible(true);
return null;
});
return new PagedFlux<>(
() -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listSinglePageAsync.invoke(inner(), ResourceUtils.groupFromResourceId(vmssId), String.format("'virtualMachineScaleSet/id' eq '%s'", vmssId), null);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
},
nextLink -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listNextSinglePageAsync.invoke(inner(), ResourceUtils.encodeResourceId(nextLink), Context.NONE);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
});
} | java.security.AccessController.doPrivileged((PrivilegedAction<Object>) () -> { | public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetIdAsync(String vmssId) {
if (CoreUtils.isNullOrEmpty(vmssId)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmssId' is required and cannot be null.")));
}
Method listSinglePageAsync;
try {
listSinglePageAsync = inner().getClass().getDeclaredMethod("listByResourceGroupSinglePageAsync", String.class, String.class, ExpandTypeForListVMs.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
Method listNextSinglePageAsync;
try {
listNextSinglePageAsync = inner().getClass().getDeclaredMethod("listNextSinglePageAsync", String.class, Context.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
java.security.AccessController.doPrivileged((PrivilegedAction<Object>) () -> {
listSinglePageAsync.setAccessible(true);
listNextSinglePageAsync.setAccessible(true);
return null;
});
return new PagedFlux<>(
() -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listSinglePageAsync.invoke(inner(), ResourceUtils.groupFromResourceId(vmssId), String.format("'virtualMachineScaleSet/id' eq '%s'", vmssId), null);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
},
nextLink -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listNextSinglePageAsync.invoke(inner(), ResourceUtils.encodeResourceId(nextLink), Context.NONE);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
});
} | class VirtualMachinesImpl
extends TopLevelModifiableResourcesImpl<
VirtualMachine, VirtualMachineImpl, VirtualMachineInner, VirtualMachinesClient, ComputeManager>
implements VirtualMachines {
private final StorageManager storageManager;
private final NetworkManager networkManager;
private final AuthorizationManager authorizationManager;
private final VirtualMachineSizesImpl vmSizes;
private final ClientLogger logger = new ClientLogger(VirtualMachinesImpl.class);
public VirtualMachinesImpl(
ComputeManager computeManager,
StorageManager storageManager,
NetworkManager networkManager,
AuthorizationManager authorizationManager) {
super(computeManager.serviceClient().getVirtualMachines(), computeManager);
this.storageManager = storageManager;
this.networkManager = networkManager;
this.authorizationManager = authorizationManager;
this.vmSizes = new VirtualMachineSizesImpl(computeManager.serviceClient().getVirtualMachineSizes());
}
@Override
public VirtualMachine.DefinitionStages.Blank define(String name) {
return wrapModel(name);
}
@Override
public void deallocate(String groupName, String name) {
this.inner().deallocate(groupName, name);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name) {
return this.inner().deallocateAsync(groupName, name);
}
@Override
public void deallocate(String groupName, String name, boolean hibernate) {
this.inner().deallocate(groupName, name, hibernate, Context.NONE);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name, boolean hibernate) {
return this.inner().deallocateAsync(groupName, name, hibernate);
}
@Override
public void generalize(String groupName, String name) {
this.inner().generalize(groupName, name);
}
@Override
public Mono<Void> generalizeAsync(String groupName, String name) {
return this.inner().generalizeAsync(groupName, name);
}
@Override
public void powerOff(String groupName, String name) {
this.powerOffAsync(groupName, name).block();
}
@Override
public Mono<Void> powerOffAsync(String groupName, String name) {
return this.inner().powerOffAsync(groupName, name, null);
}
@Override
public void restart(String groupName, String name) {
this.inner().restart(groupName, name);
}
@Override
public Mono<Void> restartAsync(String groupName, String name) {
return this.inner().restartAsync(groupName, name);
}
@Override
public void start(String groupName, String name) {
this.inner().start(groupName, name);
}
@Override
public Mono<Void> startAsync(String groupName, String name) {
return this.inner().startAsync(groupName, name);
}
@Override
public void redeploy(String groupName, String name) {
this.inner().redeploy(groupName, name);
}
@Override
public Mono<Void> redeployAsync(String groupName, String name) {
return this.inner().redeployAsync(groupName, name);
}
@Override
public String capture(String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
return this.captureAsync(groupName, name, containerName, vhdPrefix, overwriteVhd).block();
}
@Override
public Mono<String> captureAsync(
String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters();
parameters.withDestinationContainerName(containerName);
parameters.withOverwriteVhds(overwriteVhd);
parameters.withVhdPrefix(vhdPrefix);
return this
.inner()
.captureAsync(groupName, name, parameters)
.map(
captureResultInner -> {
try {
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(captureResultInner);
} catch (JsonProcessingException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(ex));
}
});
}
@Override
public void migrateToManaged(String groupName, String name) {
this.inner().convertToManagedDisks(groupName, name);
}
@Override
public Mono<Void> migrateToManagedAsync(String groupName, String name) {
return this.inner().convertToManagedDisksAsync(groupName, name);
}
@Override
public RunCommandResult runPowerShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runPowerShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runPowerShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunPowerShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runCommand(String groupName, String name, RunCommandInput inputCommand) {
return this.runCommandAsync(groupName, name, inputCommand).block();
}
@Override
public Mono<RunCommandResult> runCommandAsync(String groupName, String name, RunCommandInput inputCommand) {
return this.inner().runCommandAsync(groupName, name, inputCommand).map(RunCommandResultImpl::new);
}
@Override
public Accepted<Void> beginDeleteById(String id) {
return beginDeleteByResourceGroup(ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id));
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, null).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public void deleteById(String id, boolean forceDeletion) {
deleteByIdAsync(id, forceDeletion).block();
}
@Override
public Mono<Void> deleteByIdAsync(String id, boolean forceDeletion) {
return deleteByResourceGroupAsync(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
deleteByResourceGroupAsync(resourceGroupName, name, forceDeletion).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name, boolean forceDeletion) {
return this.inner().deleteAsync(resourceGroupName, name, forceDeletion);
}
@Override
public Accepted<Void> beginDeleteById(String id, boolean forceDeletion) {
return beginDeleteByResourceGroup(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, forceDeletion).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSetId(String vmssId) {
return new PagedIterable<>(this.listByVirtualMachineScaleSetIdAsync(vmssId));
}
@Override
@SuppressWarnings({"unchecked", "removal"})
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSet(VirtualMachineScaleSet vmss) {
return new PagedIterable<>(listByVirtualMachineScaleSetAsync(vmss));
}
@Override
public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetAsync(VirtualMachineScaleSet vmss) {
if (vmss == null) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmss' is required and cannot be null.")));
}
return listByVirtualMachineScaleSetIdAsync(vmss.id());
}
@Override
public VirtualMachineSizes sizes() {
return this.vmSizes;
}
@Override
protected VirtualMachineImpl wrapModel(String name) {
VirtualMachineInner inner = new VirtualMachineInner();
inner.withStorageProfile(new StorageProfile().withOsDisk(new OSDisk()).withDataDisks(new ArrayList<>()));
inner.withOsProfile(new OSProfile());
inner.withHardwareProfile(new HardwareProfile());
inner.withNetworkProfile(new NetworkProfile().withNetworkInterfaces(new ArrayList<>()));
return new VirtualMachineImpl(
name, inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager);
}
@Override
protected VirtualMachineImpl wrapModel(VirtualMachineInner virtualMachineInner) {
if (virtualMachineInner == null) {
return null;
}
return new VirtualMachineImpl(
virtualMachineInner.name(),
virtualMachineInner,
this.manager(),
this.storageManager,
this.networkManager,
this.authorizationManager);
}
} | class VirtualMachinesImpl
extends TopLevelModifiableResourcesImpl<
VirtualMachine, VirtualMachineImpl, VirtualMachineInner, VirtualMachinesClient, ComputeManager>
implements VirtualMachines {
private final StorageManager storageManager;
private final NetworkManager networkManager;
private final AuthorizationManager authorizationManager;
private final VirtualMachineSizesImpl vmSizes;
private final ClientLogger logger = new ClientLogger(VirtualMachinesImpl.class);
public VirtualMachinesImpl(
ComputeManager computeManager,
StorageManager storageManager,
NetworkManager networkManager,
AuthorizationManager authorizationManager) {
super(computeManager.serviceClient().getVirtualMachines(), computeManager);
this.storageManager = storageManager;
this.networkManager = networkManager;
this.authorizationManager = authorizationManager;
this.vmSizes = new VirtualMachineSizesImpl(computeManager.serviceClient().getVirtualMachineSizes());
}
@Override
public VirtualMachine.DefinitionStages.Blank define(String name) {
return wrapModel(name);
}
@Override
public void deallocate(String groupName, String name) {
this.inner().deallocate(groupName, name);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name) {
return this.inner().deallocateAsync(groupName, name);
}
@Override
public void deallocate(String groupName, String name, boolean hibernate) {
this.inner().deallocate(groupName, name, hibernate, Context.NONE);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name, boolean hibernate) {
return this.inner().deallocateAsync(groupName, name, hibernate);
}
@Override
public void generalize(String groupName, String name) {
this.inner().generalize(groupName, name);
}
@Override
public Mono<Void> generalizeAsync(String groupName, String name) {
return this.inner().generalizeAsync(groupName, name);
}
@Override
public void powerOff(String groupName, String name) {
this.powerOffAsync(groupName, name).block();
}
@Override
public Mono<Void> powerOffAsync(String groupName, String name) {
return this.inner().powerOffAsync(groupName, name, null);
}
@Override
public void restart(String groupName, String name) {
this.inner().restart(groupName, name);
}
@Override
public Mono<Void> restartAsync(String groupName, String name) {
return this.inner().restartAsync(groupName, name);
}
@Override
public void start(String groupName, String name) {
this.inner().start(groupName, name);
}
@Override
public Mono<Void> startAsync(String groupName, String name) {
return this.inner().startAsync(groupName, name);
}
@Override
public void redeploy(String groupName, String name) {
this.inner().redeploy(groupName, name);
}
@Override
public Mono<Void> redeployAsync(String groupName, String name) {
return this.inner().redeployAsync(groupName, name);
}
@Override
public String capture(String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
return this.captureAsync(groupName, name, containerName, vhdPrefix, overwriteVhd).block();
}
@Override
public Mono<String> captureAsync(
String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters();
parameters.withDestinationContainerName(containerName);
parameters.withOverwriteVhds(overwriteVhd);
parameters.withVhdPrefix(vhdPrefix);
return this
.inner()
.captureAsync(groupName, name, parameters)
.map(
captureResultInner -> {
try {
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(captureResultInner);
} catch (JsonProcessingException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(ex));
}
});
}
@Override
public void migrateToManaged(String groupName, String name) {
this.inner().convertToManagedDisks(groupName, name);
}
@Override
public Mono<Void> migrateToManagedAsync(String groupName, String name) {
return this.inner().convertToManagedDisksAsync(groupName, name);
}
@Override
public RunCommandResult runPowerShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runPowerShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runPowerShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunPowerShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runCommand(String groupName, String name, RunCommandInput inputCommand) {
return this.runCommandAsync(groupName, name, inputCommand).block();
}
@Override
public Mono<RunCommandResult> runCommandAsync(String groupName, String name, RunCommandInput inputCommand) {
return this.inner().runCommandAsync(groupName, name, inputCommand).map(RunCommandResultImpl::new);
}
@Override
public Accepted<Void> beginDeleteById(String id) {
return beginDeleteByResourceGroup(ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id));
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, null).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public void deleteById(String id, boolean forceDeletion) {
deleteByIdAsync(id, forceDeletion).block();
}
@Override
public Mono<Void> deleteByIdAsync(String id, boolean forceDeletion) {
return deleteByResourceGroupAsync(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
deleteByResourceGroupAsync(resourceGroupName, name, forceDeletion).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name, boolean forceDeletion) {
return this.inner().deleteAsync(resourceGroupName, name, forceDeletion);
}
@Override
public Accepted<Void> beginDeleteById(String id, boolean forceDeletion) {
return beginDeleteByResourceGroup(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, forceDeletion).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSetId(String vmssId) {
return new PagedIterable<>(this.listByVirtualMachineScaleSetIdAsync(vmssId));
}
@Override
@SuppressWarnings({"unchecked", "removal"})
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSet(VirtualMachineScaleSet vmss) {
return new PagedIterable<>(listByVirtualMachineScaleSetAsync(vmss));
}
@Override
public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetAsync(VirtualMachineScaleSet vmss) {
if (vmss == null) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmss' is required and cannot be null.")));
}
return listByVirtualMachineScaleSetIdAsync(vmss.id());
}
@Override
public VirtualMachineSizes sizes() {
return this.vmSizes;
}
@Override
protected VirtualMachineImpl wrapModel(String name) {
VirtualMachineInner inner = new VirtualMachineInner();
inner.withStorageProfile(new StorageProfile().withOsDisk(new OSDisk()).withDataDisks(new ArrayList<>()));
inner.withOsProfile(new OSProfile());
inner.withHardwareProfile(new HardwareProfile());
inner.withNetworkProfile(new NetworkProfile().withNetworkInterfaces(new ArrayList<>()));
return new VirtualMachineImpl(
name, inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager);
}
@Override
protected VirtualMachineImpl wrapModel(VirtualMachineInner virtualMachineInner) {
if (virtualMachineInner == null) {
return null;
}
return new VirtualMachineImpl(
virtualMachineInner.name(),
virtualMachineInner,
this.manager(),
this.storageManager,
this.networkManager,
this.authorizationManager);
}
} |
Ok, if core uses it, I guess we can (at least temporary). | public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetIdAsync(String vmssId) {
if (CoreUtils.isNullOrEmpty(vmssId)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmssId' is required and cannot be null.")));
}
Method listSinglePageAsync;
try {
listSinglePageAsync = inner().getClass().getDeclaredMethod("listByResourceGroupSinglePageAsync", String.class, String.class, ExpandTypeForListVMs.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
Method listNextSinglePageAsync;
try {
listNextSinglePageAsync = inner().getClass().getDeclaredMethod("listNextSinglePageAsync", String.class, Context.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
java.security.AccessController.doPrivileged((PrivilegedAction<Object>) () -> {
listSinglePageAsync.setAccessible(true);
listNextSinglePageAsync.setAccessible(true);
return null;
});
return new PagedFlux<>(
() -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listSinglePageAsync.invoke(inner(), ResourceUtils.groupFromResourceId(vmssId), String.format("'virtualMachineScaleSet/id' eq '%s'", vmssId), null);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
},
nextLink -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listNextSinglePageAsync.invoke(inner(), ResourceUtils.encodeResourceId(nextLink), Context.NONE);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
});
} | java.security.AccessController.doPrivileged((PrivilegedAction<Object>) () -> { | public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetIdAsync(String vmssId) {
if (CoreUtils.isNullOrEmpty(vmssId)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmssId' is required and cannot be null.")));
}
Method listSinglePageAsync;
try {
listSinglePageAsync = inner().getClass().getDeclaredMethod("listByResourceGroupSinglePageAsync", String.class, String.class, ExpandTypeForListVMs.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
Method listNextSinglePageAsync;
try {
listNextSinglePageAsync = inner().getClass().getDeclaredMethod("listNextSinglePageAsync", String.class, Context.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
java.security.AccessController.doPrivileged((PrivilegedAction<Object>) () -> {
listSinglePageAsync.setAccessible(true);
listNextSinglePageAsync.setAccessible(true);
return null;
});
return new PagedFlux<>(
() -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listSinglePageAsync.invoke(inner(), ResourceUtils.groupFromResourceId(vmssId), String.format("'virtualMachineScaleSet/id' eq '%s'", vmssId), null);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
},
nextLink -> {
try {
return (Mono<PagedResponse<VirtualMachine>>)
listNextSinglePageAsync.invoke(inner(), ResourceUtils.encodeResourceId(nextLink), Context.NONE);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
});
} | class VirtualMachinesImpl
extends TopLevelModifiableResourcesImpl<
VirtualMachine, VirtualMachineImpl, VirtualMachineInner, VirtualMachinesClient, ComputeManager>
implements VirtualMachines {
private final StorageManager storageManager;
private final NetworkManager networkManager;
private final AuthorizationManager authorizationManager;
private final VirtualMachineSizesImpl vmSizes;
private final ClientLogger logger = new ClientLogger(VirtualMachinesImpl.class);
public VirtualMachinesImpl(
ComputeManager computeManager,
StorageManager storageManager,
NetworkManager networkManager,
AuthorizationManager authorizationManager) {
super(computeManager.serviceClient().getVirtualMachines(), computeManager);
this.storageManager = storageManager;
this.networkManager = networkManager;
this.authorizationManager = authorizationManager;
this.vmSizes = new VirtualMachineSizesImpl(computeManager.serviceClient().getVirtualMachineSizes());
}
@Override
public VirtualMachine.DefinitionStages.Blank define(String name) {
return wrapModel(name);
}
@Override
public void deallocate(String groupName, String name) {
this.inner().deallocate(groupName, name);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name) {
return this.inner().deallocateAsync(groupName, name);
}
@Override
public void deallocate(String groupName, String name, boolean hibernate) {
this.inner().deallocate(groupName, name, hibernate, Context.NONE);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name, boolean hibernate) {
return this.inner().deallocateAsync(groupName, name, hibernate);
}
@Override
public void generalize(String groupName, String name) {
this.inner().generalize(groupName, name);
}
@Override
public Mono<Void> generalizeAsync(String groupName, String name) {
return this.inner().generalizeAsync(groupName, name);
}
@Override
public void powerOff(String groupName, String name) {
this.powerOffAsync(groupName, name).block();
}
@Override
public Mono<Void> powerOffAsync(String groupName, String name) {
return this.inner().powerOffAsync(groupName, name, null);
}
@Override
public void restart(String groupName, String name) {
this.inner().restart(groupName, name);
}
@Override
public Mono<Void> restartAsync(String groupName, String name) {
return this.inner().restartAsync(groupName, name);
}
@Override
public void start(String groupName, String name) {
this.inner().start(groupName, name);
}
@Override
public Mono<Void> startAsync(String groupName, String name) {
return this.inner().startAsync(groupName, name);
}
@Override
public void redeploy(String groupName, String name) {
this.inner().redeploy(groupName, name);
}
@Override
public Mono<Void> redeployAsync(String groupName, String name) {
return this.inner().redeployAsync(groupName, name);
}
@Override
public String capture(String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
return this.captureAsync(groupName, name, containerName, vhdPrefix, overwriteVhd).block();
}
@Override
public Mono<String> captureAsync(
String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters();
parameters.withDestinationContainerName(containerName);
parameters.withOverwriteVhds(overwriteVhd);
parameters.withVhdPrefix(vhdPrefix);
return this
.inner()
.captureAsync(groupName, name, parameters)
.map(
captureResultInner -> {
try {
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(captureResultInner);
} catch (JsonProcessingException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(ex));
}
});
}
@Override
public void migrateToManaged(String groupName, String name) {
this.inner().convertToManagedDisks(groupName, name);
}
@Override
public Mono<Void> migrateToManagedAsync(String groupName, String name) {
return this.inner().convertToManagedDisksAsync(groupName, name);
}
@Override
public RunCommandResult runPowerShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runPowerShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runPowerShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunPowerShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runCommand(String groupName, String name, RunCommandInput inputCommand) {
return this.runCommandAsync(groupName, name, inputCommand).block();
}
@Override
public Mono<RunCommandResult> runCommandAsync(String groupName, String name, RunCommandInput inputCommand) {
return this.inner().runCommandAsync(groupName, name, inputCommand).map(RunCommandResultImpl::new);
}
@Override
public Accepted<Void> beginDeleteById(String id) {
return beginDeleteByResourceGroup(ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id));
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, null).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public void deleteById(String id, boolean forceDeletion) {
deleteByIdAsync(id, forceDeletion).block();
}
@Override
public Mono<Void> deleteByIdAsync(String id, boolean forceDeletion) {
return deleteByResourceGroupAsync(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
deleteByResourceGroupAsync(resourceGroupName, name, forceDeletion).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name, boolean forceDeletion) {
return this.inner().deleteAsync(resourceGroupName, name, forceDeletion);
}
@Override
public Accepted<Void> beginDeleteById(String id, boolean forceDeletion) {
return beginDeleteByResourceGroup(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, forceDeletion).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSetId(String vmssId) {
return new PagedIterable<>(this.listByVirtualMachineScaleSetIdAsync(vmssId));
}
@Override
@SuppressWarnings({"unchecked", "removal"})
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSet(VirtualMachineScaleSet vmss) {
return new PagedIterable<>(listByVirtualMachineScaleSetAsync(vmss));
}
@Override
public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetAsync(VirtualMachineScaleSet vmss) {
if (vmss == null) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmss' is required and cannot be null.")));
}
return listByVirtualMachineScaleSetIdAsync(vmss.id());
}
@Override
public VirtualMachineSizes sizes() {
return this.vmSizes;
}
@Override
protected VirtualMachineImpl wrapModel(String name) {
VirtualMachineInner inner = new VirtualMachineInner();
inner.withStorageProfile(new StorageProfile().withOsDisk(new OSDisk()).withDataDisks(new ArrayList<>()));
inner.withOsProfile(new OSProfile());
inner.withHardwareProfile(new HardwareProfile());
inner.withNetworkProfile(new NetworkProfile().withNetworkInterfaces(new ArrayList<>()));
return new VirtualMachineImpl(
name, inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager);
}
@Override
protected VirtualMachineImpl wrapModel(VirtualMachineInner virtualMachineInner) {
if (virtualMachineInner == null) {
return null;
}
return new VirtualMachineImpl(
virtualMachineInner.name(),
virtualMachineInner,
this.manager(),
this.storageManager,
this.networkManager,
this.authorizationManager);
}
} | class VirtualMachinesImpl
extends TopLevelModifiableResourcesImpl<
VirtualMachine, VirtualMachineImpl, VirtualMachineInner, VirtualMachinesClient, ComputeManager>
implements VirtualMachines {
private final StorageManager storageManager;
private final NetworkManager networkManager;
private final AuthorizationManager authorizationManager;
private final VirtualMachineSizesImpl vmSizes;
private final ClientLogger logger = new ClientLogger(VirtualMachinesImpl.class);
public VirtualMachinesImpl(
ComputeManager computeManager,
StorageManager storageManager,
NetworkManager networkManager,
AuthorizationManager authorizationManager) {
super(computeManager.serviceClient().getVirtualMachines(), computeManager);
this.storageManager = storageManager;
this.networkManager = networkManager;
this.authorizationManager = authorizationManager;
this.vmSizes = new VirtualMachineSizesImpl(computeManager.serviceClient().getVirtualMachineSizes());
}
@Override
public VirtualMachine.DefinitionStages.Blank define(String name) {
return wrapModel(name);
}
@Override
public void deallocate(String groupName, String name) {
this.inner().deallocate(groupName, name);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name) {
return this.inner().deallocateAsync(groupName, name);
}
@Override
public void deallocate(String groupName, String name, boolean hibernate) {
this.inner().deallocate(groupName, name, hibernate, Context.NONE);
}
@Override
public Mono<Void> deallocateAsync(String groupName, String name, boolean hibernate) {
return this.inner().deallocateAsync(groupName, name, hibernate);
}
@Override
public void generalize(String groupName, String name) {
this.inner().generalize(groupName, name);
}
@Override
public Mono<Void> generalizeAsync(String groupName, String name) {
return this.inner().generalizeAsync(groupName, name);
}
@Override
public void powerOff(String groupName, String name) {
this.powerOffAsync(groupName, name).block();
}
@Override
public Mono<Void> powerOffAsync(String groupName, String name) {
return this.inner().powerOffAsync(groupName, name, null);
}
@Override
public void restart(String groupName, String name) {
this.inner().restart(groupName, name);
}
@Override
public Mono<Void> restartAsync(String groupName, String name) {
return this.inner().restartAsync(groupName, name);
}
@Override
public void start(String groupName, String name) {
this.inner().start(groupName, name);
}
@Override
public Mono<Void> startAsync(String groupName, String name) {
return this.inner().startAsync(groupName, name);
}
@Override
public void redeploy(String groupName, String name) {
this.inner().redeploy(groupName, name);
}
@Override
public Mono<Void> redeployAsync(String groupName, String name) {
return this.inner().redeployAsync(groupName, name);
}
@Override
public String capture(String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
return this.captureAsync(groupName, name, containerName, vhdPrefix, overwriteVhd).block();
}
@Override
public Mono<String> captureAsync(
String groupName, String name, String containerName, String vhdPrefix, boolean overwriteVhd) {
VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters();
parameters.withDestinationContainerName(containerName);
parameters.withOverwriteVhds(overwriteVhd);
parameters.withVhdPrefix(vhdPrefix);
return this
.inner()
.captureAsync(groupName, name, parameters)
.map(
captureResultInner -> {
try {
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(captureResultInner);
} catch (JsonProcessingException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(ex));
}
});
}
@Override
public void migrateToManaged(String groupName, String name) {
this.inner().convertToManagedDisks(groupName, name);
}
@Override
public Mono<Void> migrateToManagedAsync(String groupName, String name) {
return this.inner().convertToManagedDisksAsync(groupName, name);
}
@Override
public RunCommandResult runPowerShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runPowerShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runPowerShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunPowerShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runShellScript(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
return this.runShellScriptAsync(groupName, name, scriptLines, scriptParameters).block();
}
@Override
public Mono<RunCommandResult> runShellScriptAsync(
String groupName, String name, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) {
RunCommandInput inputCommand = new RunCommandInput();
inputCommand.withCommandId("RunShellScript");
inputCommand.withScript(scriptLines);
inputCommand.withParameters(scriptParameters);
return this.runCommandAsync(groupName, name, inputCommand);
}
@Override
public RunCommandResult runCommand(String groupName, String name, RunCommandInput inputCommand) {
return this.runCommandAsync(groupName, name, inputCommand).block();
}
@Override
public Mono<RunCommandResult> runCommandAsync(String groupName, String name, RunCommandInput inputCommand) {
return this.inner().runCommandAsync(groupName, name, inputCommand).map(RunCommandResultImpl::new);
}
@Override
public Accepted<Void> beginDeleteById(String id) {
return beginDeleteByResourceGroup(ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id));
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, null).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public void deleteById(String id, boolean forceDeletion) {
deleteByIdAsync(id, forceDeletion).block();
}
@Override
public Mono<Void> deleteByIdAsync(String id, boolean forceDeletion) {
return deleteByResourceGroupAsync(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public void deleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
deleteByResourceGroupAsync(resourceGroupName, name, forceDeletion).block();
}
@Override
public Mono<Void> deleteByResourceGroupAsync(String resourceGroupName, String name, boolean forceDeletion) {
return this.inner().deleteAsync(resourceGroupName, name, forceDeletion);
}
@Override
public Accepted<Void> beginDeleteById(String id, boolean forceDeletion) {
return beginDeleteByResourceGroup(
ResourceUtils.groupFromResourceId(id), ResourceUtils.nameFromResourceId(id), forceDeletion);
}
@Override
public Accepted<Void> beginDeleteByResourceGroup(String resourceGroupName, String name, boolean forceDeletion) {
return AcceptedImpl
.newAccepted(
logger,
this.manager().serviceClient().getHttpPipeline(),
this.manager().serviceClient().getDefaultPollInterval(),
() -> this.inner().deleteWithResponseAsync(resourceGroupName, name, forceDeletion).block(),
Function.identity(),
Void.class,
null,
Context.NONE);
}
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSetId(String vmssId) {
return new PagedIterable<>(this.listByVirtualMachineScaleSetIdAsync(vmssId));
}
@Override
@SuppressWarnings({"unchecked", "removal"})
@Override
public PagedIterable<VirtualMachine> listByVirtualMachineScaleSet(VirtualMachineScaleSet vmss) {
return new PagedIterable<>(listByVirtualMachineScaleSetAsync(vmss));
}
@Override
public PagedFlux<VirtualMachine> listByVirtualMachineScaleSetAsync(VirtualMachineScaleSet vmss) {
if (vmss == null) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'vmss' is required and cannot be null.")));
}
return listByVirtualMachineScaleSetIdAsync(vmss.id());
}
@Override
public VirtualMachineSizes sizes() {
return this.vmSizes;
}
@Override
protected VirtualMachineImpl wrapModel(String name) {
VirtualMachineInner inner = new VirtualMachineInner();
inner.withStorageProfile(new StorageProfile().withOsDisk(new OSDisk()).withDataDisks(new ArrayList<>()));
inner.withOsProfile(new OSProfile());
inner.withHardwareProfile(new HardwareProfile());
inner.withNetworkProfile(new NetworkProfile().withNetworkInterfaces(new ArrayList<>()));
return new VirtualMachineImpl(
name, inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager);
}
@Override
protected VirtualMachineImpl wrapModel(VirtualMachineInner virtualMachineInner) {
if (virtualMachineInner == null) {
return null;
}
return new VirtualMachineImpl(
virtualMachineInner.name(),
virtualMachineInner,
this.manager(),
this.storageManager,
this.networkManager,
this.authorizationManager);
}
} |
Redundant null check here and in the status part | public String toString() {
String fields;
if (CoreUtils.isNullOrEmpty(this.fields)) {
fields = "ALL_FIELDS";
} else {
fields = this.fields == null ? null : IterableStream.of(this.fields)
.stream()
.map(fieldsEnumValue -> fieldsEnumValue.toString())
.collect(Collectors.joining(","));
}
String status;
if (CoreUtils.isNullOrEmpty(this.status)) {
status = "ALL_STATUS";
} else {
status = this.status == null ? null : IterableStream.of(this.status)
.stream()
.map(statusEnumValue -> statusEnumValue.toString())
.collect(Collectors.joining(","));
}
return String.format("SnapshotSelector(name=%s, status=%s, fields=%s)",
this.name, status, fields);
} | fields = this.fields == null ? null : IterableStream.of(this.fields) | public String toString() {
String fields;
if (CoreUtils.isNullOrEmpty(this.fields)) {
fields = "ALL_FIELDS";
} else {
fields = this.fields == null ? null : IterableStream.of(this.fields)
.stream()
.map(fieldsEnumValue -> fieldsEnumValue.toString())
.collect(Collectors.joining(","));
}
String status;
if (CoreUtils.isNullOrEmpty(this.status)) {
status = "ALL_STATUS";
} else {
status = this.status == null ? null : IterableStream.of(this.status)
.stream()
.map(statusEnumValue -> statusEnumValue.toString())
.collect(Collectors.joining(","));
}
return "SnapshotSelector(name=" + name + ", status=" + status + ", fields=" + fields + ")";
} | class SnapshotSelector {
private String name;
private List<SnapshotStatus> status;
private List<SnapshotFields> fields;
/**
* Gets the snapshot name
*
* @return The snapshot name.
*/
public String getName() {
return name;
}
/**
* Sets the snapshot name.
*
* @param name the snapshot name.
* @return The updated SnapshotSelector object
*/
public SnapshotSelector setName(String name) {
this.name = name;
return this;
}
/**
* Gets the snapshot status
*
* @return The snapshot status.
*/
public List<SnapshotStatus> getSnapshotStatus() {
return status;
}
/**
* Sets the snapshot status. Used to filter returned snapshots by their status properties.
*
* @param status the snapshot status.
* @return The updated SnapshotSelector object
*/
public SnapshotSelector setSnapshotStatus(SnapshotStatus... status) {
this.status = status == null ? null : Arrays.asList(status);
return this;
}
/**
* Gets the fields on {@link ConfigurationSettingsSnapshot} to return from the GET request. If none are set, the
* service returns the snapshot with all of their fields populated.
*
* @return The set of {@link ConfigurationSettingsSnapshot} fields to return for a GET request.
*/
public List<SnapshotFields> getFields() {
return fields;
}
/**
* Sets fields that will be returned in the response corresponding to properties in
* {@link ConfigurationSettingsSnapshot}. If none are set, the service returns snapshot with all of their fields
* populated.
*
* @param fields The fields to select for the query response. If none are set, the service will return the
* snapshot with a default set of properties.
*
* @return The updated SnapshotSelector object.
*/
public SnapshotSelector setFields(SnapshotFields... fields) {
this.fields = fields == null ? null : Arrays.asList(fields);
return this;
}
@Override
} | class SnapshotSelector {
private String name;
private List<SnapshotStatus> status;
private List<SnapshotFields> fields;
/**
* Gets the snapshot name
*
* @return The snapshot name.
*/
public String getName() {
return name;
}
/**
* Sets the snapshot name.
*
* @param name the snapshot name.
* @return The updated SnapshotSelector object
*/
public SnapshotSelector setName(String name) {
this.name = name;
return this;
}
/**
* Gets the snapshot status
*
* @return The snapshot status.
*/
public List<SnapshotStatus> getSnapshotStatus() {
return status;
}
/**
* Sets the snapshot status. Used to filter returned snapshots by their status properties.
*
* @param status the snapshot status.
* @return The updated SnapshotSelector object
*/
public SnapshotSelector setSnapshotStatus(SnapshotStatus... status) {
this.status = status == null ? null : Arrays.asList(status);
return this;
}
/**
* Gets the fields on {@link ConfigurationSettingsSnapshot} to return from the GET request. If none are set, the
* service returns the snapshot with all of their fields populated.
*
* @return The set of {@link ConfigurationSettingsSnapshot} fields to return for a GET request.
*/
public List<SnapshotFields> getFields() {
return fields;
}
/**
* Sets fields that will be returned in the response corresponding to properties in
* {@link ConfigurationSettingsSnapshot}. If none are set, the service returns snapshot with all of their fields
* populated.
*
* @param fields The fields to select for the query response. If none are set, the service will return the
* snapshot with a default set of properties.
*
* @return The updated SnapshotSelector object.
*/
public SnapshotSelector setFields(SnapshotFields... fields) {
this.fields = fields == null ? null : Arrays.asList(fields);
return this;
}
@Override
} |
Remove this line of comment. | protected byte[] engineSign() {
byte[] mHash = getDigestValue();
String encode = Base64.getEncoder().encodeToString(mHash);
if (keyVaultClient != null) {
return keyVaultClient.getSignedWithPrivateKey(this.keyVaultDigestName, encode, keyId);
}
return new byte[0];
} | protected byte[] engineSign() {
byte[] mHash = getDigestValue();
String encode = Base64.getEncoder().encodeToString(mHash);
if (keyVaultClient != null) {
return keyVaultClient.getSignedWithPrivateKey(this.keyVaultDigestName, encode, keyId);
}
return new byte[0];
} | class KeyVaultKeylessRsaSignature extends AbstractKeyVaultKeylessSignature {
private final String keyVaultDigestName;
/**
* Construct a new KeyVaultKeyLessRsaSignature
*/
KeyVaultKeylessRsaSignature(String digestName, String keyVaultDigestName) {
if (digestName != null) {
try {
messageDigest = MessageDigest.getInstance(digestName);
} catch (NoSuchAlgorithmException e) {
throw new ProviderException(e);
}
}
this.keyVaultDigestName = keyVaultDigestName;
}
@Override
} | class KeyVaultKeylessRsaSignature extends AbstractKeyVaultKeylessSignature {
private final String keyVaultDigestName;
/**
* Construct a new KeyVaultKeyLessRsaSignature
*/
KeyVaultKeylessRsaSignature(String digestName, String keyVaultDigestName) {
if (digestName != null) {
try {
messageDigest = MessageDigest.getInstance(digestName);
} catch (NoSuchAlgorithmException e) {
throw new ProviderException(e);
}
}
this.keyVaultDigestName = keyVaultDigestName;
}
@Override
} | |
Since we aren't throwing or returning this exception there is no need to wrap this in a `RuntimeException` and we should call `LOGGER.warning("Defaulting to service use for cryptographic operations.", e)` | private Mono<Boolean> isValidKeyLocallyAvailable() {
boolean keyNotAvailable = (key == null && keyCollection != null);
if (keyNotAvailable) {
if (keyCollection.equals(CryptographyClientImpl.SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = jsonWebKey;
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.logExceptionAsWarning(
new RuntimeException("Defaulting to service use for cryptographic operations.", e));
return false;
}
}
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = keyVaultKey.getKey();
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.logExceptionAsWarning(
new RuntimeException("Defaulting to service use for cryptographic operations.", e));
return false;
}
}
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
} | LOGGER.logExceptionAsWarning( | private Mono<Boolean> isValidKeyLocallyAvailable() {
if (localOperationNotSupported) {
return Mono.just(false);
}
boolean keyNotAvailable = (key == null && keyCollection != null);
if (keyNotAvailable) {
if (Objects.equals(keyCollection, CryptographyClientImpl.SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = jsonWebKey;
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.warning("Defaulting to service use for cryptographic operations.", e);
return false;
}
}
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = keyVaultKey.getKey();
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.warning("Defaulting to service use for cryptographic operations.", e);
return false;
}
}
return true;
} else {
return false;
}
});
}
} else {
return Mono.just(true);
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final String keyCollection;
private final HttpPipeline pipeline;
private boolean localOperationNotSupported = false;
private LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
JsonWebKey key;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyCollection = unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.keyCollection = null;
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.implClient = null;
this.localKeyCryptographyClient = initializeCryptoClient(key, null);
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (implClient != null) {
return implClient.getKeyAsync(context);
} else {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(implClient::getSecretKeyAsync)
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
return encrypt(algorithm, plaintext, Context.NONE);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(algorithm, plaintext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, key, context);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(encryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, key, context);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, key, context);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(decryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, key, context);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signAsync(algorithm, digest, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, key, context);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, key, context);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.wrapKeyAsync(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap kKey operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, this.key, context);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap key operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, key, context);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signDataAsync(algorithm, data, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, key, context);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, key, context);
});
}
CryptographyClientImpl getImplClient() {
return implClient;
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final String keyCollection;
private final HttpPipeline pipeline;
private volatile boolean localOperationNotSupported = false;
private LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
volatile JsonWebKey key;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyCollection = unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.keyCollection = null;
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.implClient = null;
this.localKeyCryptographyClient = initializeCryptoClient(key, null);
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (implClient != null) {
return implClient.getKeyAsync(context);
} else {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(implClient::getSecretKeyAsync)
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
return encrypt(algorithm, plaintext, Context.NONE);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(algorithm, plaintext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, key, context);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(encryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, key, context);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, key, context);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(decryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, key, context);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signAsync(algorithm, digest, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, key, context);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, key, context);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.wrapKeyAsync(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap kKey operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, this.key, context);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap key operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, key, context);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signDataAsync(algorithm, data, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, key, context);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, key, context);
});
}
} |
Does the exception that gets thrown here (`e`) include useful information on why we are defaulting to using the service? | private Mono<Boolean> isValidKeyLocallyAvailable() {
boolean keyNotAvailable = (key == null && keyCollection != null);
if (keyNotAvailable) {
if (keyCollection.equals(CryptographyClientImpl.SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = jsonWebKey;
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.logExceptionAsWarning(
new RuntimeException("Defaulting to service use for cryptographic operations.", e));
return false;
}
}
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = keyVaultKey.getKey();
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.logExceptionAsWarning(
new RuntimeException("Defaulting to service use for cryptographic operations.", e));
return false;
}
}
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
} | new RuntimeException("Defaulting to service use for cryptographic operations.", e)); | private Mono<Boolean> isValidKeyLocallyAvailable() {
if (localOperationNotSupported) {
return Mono.just(false);
}
boolean keyNotAvailable = (key == null && keyCollection != null);
if (keyNotAvailable) {
if (Objects.equals(keyCollection, CryptographyClientImpl.SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = jsonWebKey;
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.warning("Defaulting to service use for cryptographic operations.", e);
return false;
}
}
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = keyVaultKey.getKey();
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.warning("Defaulting to service use for cryptographic operations.", e);
return false;
}
}
return true;
} else {
return false;
}
});
}
} else {
return Mono.just(true);
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final String keyCollection;
private final HttpPipeline pipeline;
private boolean localOperationNotSupported = false;
private LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
JsonWebKey key;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyCollection = unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.keyCollection = null;
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.implClient = null;
this.localKeyCryptographyClient = initializeCryptoClient(key, null);
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (implClient != null) {
return implClient.getKeyAsync(context);
} else {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(implClient::getSecretKeyAsync)
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
return encrypt(algorithm, plaintext, Context.NONE);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(algorithm, plaintext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, key, context);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(encryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, key, context);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, key, context);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(decryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, key, context);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signAsync(algorithm, digest, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, key, context);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, key, context);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.wrapKeyAsync(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap kKey operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, this.key, context);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap key operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, key, context);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signDataAsync(algorithm, data, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, key, context);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, key, context);
});
}
CryptographyClientImpl getImplClient() {
return implClient;
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final String keyCollection;
private final HttpPipeline pipeline;
private volatile boolean localOperationNotSupported = false;
private LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
volatile JsonWebKey key;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyCollection = unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.keyCollection = null;
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.implClient = null;
this.localKeyCryptographyClient = initializeCryptoClient(key, null);
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (implClient != null) {
return implClient.getKeyAsync(context);
} else {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(implClient::getSecretKeyAsync)
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
return encrypt(algorithm, plaintext, Context.NONE);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(algorithm, plaintext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, key, context);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(encryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, key, context);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, key, context);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(decryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, key, context);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signAsync(algorithm, digest, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, key, context);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, key, context);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.wrapKeyAsync(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap kKey operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, this.key, context);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap key operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, key, context);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signDataAsync(algorithm, data, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, key, context);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, key, context);
});
}
} |
The default behavior is to fetch a key from Key Vault and perform crypto operations locally to favor performance. The exception would shed some light into why the client was not able to perform an operation locally and went back to the service. | private Mono<Boolean> isValidKeyLocallyAvailable() {
boolean keyNotAvailable = (key == null && keyCollection != null);
if (keyNotAvailable) {
if (keyCollection.equals(CryptographyClientImpl.SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = jsonWebKey;
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.logExceptionAsWarning(
new RuntimeException("Defaulting to service use for cryptographic operations.", e));
return false;
}
}
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = keyVaultKey.getKey();
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.logExceptionAsWarning(
new RuntimeException("Defaulting to service use for cryptographic operations.", e));
return false;
}
}
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
} | new RuntimeException("Defaulting to service use for cryptographic operations.", e)); | private Mono<Boolean> isValidKeyLocallyAvailable() {
if (localOperationNotSupported) {
return Mono.just(false);
}
boolean keyNotAvailable = (key == null && keyCollection != null);
if (keyNotAvailable) {
if (Objects.equals(keyCollection, CryptographyClientImpl.SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = jsonWebKey;
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.warning("Defaulting to service use for cryptographic operations.", e);
return false;
}
}
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = keyVaultKey.getKey();
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.warning("Defaulting to service use for cryptographic operations.", e);
return false;
}
}
return true;
} else {
return false;
}
});
}
} else {
return Mono.just(true);
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final String keyCollection;
private final HttpPipeline pipeline;
private boolean localOperationNotSupported = false;
private LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
JsonWebKey key;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyCollection = unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.keyCollection = null;
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.implClient = null;
this.localKeyCryptographyClient = initializeCryptoClient(key, null);
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (implClient != null) {
return implClient.getKeyAsync(context);
} else {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(implClient::getSecretKeyAsync)
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
return encrypt(algorithm, plaintext, Context.NONE);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(algorithm, plaintext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, key, context);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(encryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, key, context);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, key, context);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(decryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, key, context);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signAsync(algorithm, digest, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, key, context);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, key, context);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.wrapKeyAsync(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap kKey operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, this.key, context);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap key operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, key, context);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signDataAsync(algorithm, data, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, key, context);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, key, context);
});
}
CryptographyClientImpl getImplClient() {
return implClient;
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final String keyCollection;
private final HttpPipeline pipeline;
private volatile boolean localOperationNotSupported = false;
private LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
volatile JsonWebKey key;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyCollection = unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.keyCollection = null;
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.implClient = null;
this.localKeyCryptographyClient = initializeCryptoClient(key, null);
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (implClient != null) {
return implClient.getKeyAsync(context);
} else {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(implClient::getSecretKeyAsync)
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
return encrypt(algorithm, plaintext, Context.NONE);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(algorithm, plaintext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, key, context);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(encryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, key, context);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, key, context);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(decryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, key, context);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signAsync(algorithm, digest, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, key, context);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, key, context);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.wrapKeyAsync(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap kKey operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, this.key, context);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap key operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, key, context);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signDataAsync(algorithm, data, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, key, context);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, key, context);
});
}
} |
nit: no need for the `Mono.defer` here. `Mono.defer` should only be used if the value being set in the `Mono.just` is expensive to compute to defer that computation until the Mono, or Flux, is subscribed. This can just be `Mono.just`. And actually, in this case, this would make performance worse as `Mono.defer` isn't `Fuseable` which Reactor uses to merge multiple reactive operations into a more performant computation. | private Mono<Boolean> isValidKeyLocallyAvailable() {
if (localOperationNotSupported) {
return Mono.defer(() -> Mono.just(false));
}
boolean keyNotAvailable = (key == null && keyCollection != null);
if (keyNotAvailable) {
if (keyCollection.equals(CryptographyClientImpl.SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = jsonWebKey;
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.warning("Defaulting to service use for cryptographic operations.", e);
return false;
}
}
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = keyVaultKey.getKey();
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.warning("Defaulting to service use for cryptographic operations.", e);
return false;
}
}
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
} | return Mono.defer(() -> Mono.just(false)); | private Mono<Boolean> isValidKeyLocallyAvailable() {
if (localOperationNotSupported) {
return Mono.just(false);
}
boolean keyNotAvailable = (key == null && keyCollection != null);
if (keyNotAvailable) {
if (Objects.equals(keyCollection, CryptographyClientImpl.SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = jsonWebKey;
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.warning("Defaulting to service use for cryptographic operations.", e);
return false;
}
}
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = keyVaultKey.getKey();
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.warning("Defaulting to service use for cryptographic operations.", e);
return false;
}
}
return true;
} else {
return false;
}
});
}
} else {
return Mono.just(true);
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final String keyCollection;
private final HttpPipeline pipeline;
private boolean localOperationNotSupported = false;
private LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
JsonWebKey key;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyCollection = unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.keyCollection = null;
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.implClient = null;
this.localKeyCryptographyClient = initializeCryptoClient(key, null);
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (implClient != null) {
return implClient.getKeyAsync(context);
} else {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(implClient::getSecretKeyAsync)
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
return encrypt(algorithm, plaintext, Context.NONE);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(algorithm, plaintext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, key, context);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(encryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, key, context);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, key, context);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(decryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, key, context);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signAsync(algorithm, digest, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, key, context);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, key, context);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.wrapKeyAsync(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap kKey operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, this.key, context);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap key operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, key, context);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signDataAsync(algorithm, data, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, key, context);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, key, context);
});
}
CryptographyClientImpl getImplClient() {
return implClient;
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final String keyCollection;
private final HttpPipeline pipeline;
private volatile boolean localOperationNotSupported = false;
private LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
volatile JsonWebKey key;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyCollection = unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.keyCollection = null;
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.implClient = null;
this.localKeyCryptographyClient = initializeCryptoClient(key, null);
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (implClient != null) {
return implClient.getKeyAsync(context);
} else {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(implClient::getSecretKeyAsync)
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
return encrypt(algorithm, plaintext, Context.NONE);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(algorithm, plaintext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, key, context);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(encryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, key, context);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, key, context);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(decryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, key, context);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signAsync(algorithm, digest, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, key, context);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, key, context);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.wrapKeyAsync(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap kKey operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, this.key, context);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap key operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, key, context);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signDataAsync(algorithm, data, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, key, context);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, key, context);
});
}
} |
nit: `Objects.equals` is safer here in case `keyCollection` is null (which I don't think it could be under normal runtime behaviors) | private Mono<Boolean> isValidKeyLocallyAvailable() {
if (localOperationNotSupported) {
return Mono.defer(() -> Mono.just(false));
}
boolean keyNotAvailable = (key == null && keyCollection != null);
if (keyNotAvailable) {
if (keyCollection.equals(CryptographyClientImpl.SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = jsonWebKey;
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.warning("Defaulting to service use for cryptographic operations.", e);
return false;
}
}
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = keyVaultKey.getKey();
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.warning("Defaulting to service use for cryptographic operations.", e);
return false;
}
}
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
} | if (keyCollection.equals(CryptographyClientImpl.SECRETS_COLLECTION)) { | private Mono<Boolean> isValidKeyLocallyAvailable() {
if (localOperationNotSupported) {
return Mono.just(false);
}
boolean keyNotAvailable = (key == null && keyCollection != null);
if (keyNotAvailable) {
if (Objects.equals(keyCollection, CryptographyClientImpl.SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = jsonWebKey;
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.warning("Defaulting to service use for cryptographic operations.", e);
return false;
}
}
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = keyVaultKey.getKey();
if (key.isValid()) {
if (localKeyCryptographyClient == null) {
try {
localKeyCryptographyClient = initializeCryptoClient(key, implClient);
} catch (RuntimeException e) {
localOperationNotSupported = true;
LOGGER.warning("Defaulting to service use for cryptographic operations.", e);
return false;
}
}
return true;
} else {
return false;
}
});
}
} else {
return Mono.just(true);
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final String keyCollection;
private final HttpPipeline pipeline;
private boolean localOperationNotSupported = false;
private LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
JsonWebKey key;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyCollection = unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.keyCollection = null;
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.implClient = null;
this.localKeyCryptographyClient = initializeCryptoClient(key, null);
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (implClient != null) {
return implClient.getKeyAsync(context);
} else {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(implClient::getSecretKeyAsync)
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
return encrypt(algorithm, plaintext, Context.NONE);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(algorithm, plaintext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, key, context);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(encryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, key, context);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, key, context);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(decryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, key, context);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signAsync(algorithm, digest, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, key, context);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, key, context);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.wrapKeyAsync(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap kKey operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, this.key, context);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap key operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, key, context);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signDataAsync(algorithm, data, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, key, context);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, key, context);
});
}
CryptographyClientImpl getImplClient() {
return implClient;
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final String keyCollection;
private final HttpPipeline pipeline;
private volatile boolean localOperationNotSupported = false;
private LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
volatile JsonWebKey key;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyCollection = unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.keyCollection = null;
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.implClient = null;
this.localKeyCryptographyClient = initializeCryptoClient(key, null);
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (implClient != null) {
return implClient.getKeyAsync(context);
} else {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(implClient::getSecretKeyAsync)
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
return encrypt(algorithm, plaintext, Context.NONE);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(algorithm, plaintext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, key, context);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.encryptAsync(encryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, key, context);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, key, context);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.decryptAsync(decryptParameters, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, key, context);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signAsync(algorithm, digest, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, key, context);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, key, context);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.wrapKeyAsync(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap kKey operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, this.key, context);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap key operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, key, context);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.signDataAsync(algorithm, data, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, key, context);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
return isValidKeyLocallyAvailable().flatMap(available -> {
if (!available) {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
if (!checkKeyPermissions(key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, key, context);
});
}
} |
sanitizers are also needed in playback mode. | protected void beforeTest() {
TokenCredential credential;
HttpPipeline httpPipeline;
String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL);
HttpLogDetailLevel httpLogDetailLevel;
try {
httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel);
} catch (Exception e) {
if (isPlaybackMode()) {
httpLogDetailLevel = HttpLogDetailLevel.NONE;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL);
} else {
httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL);
}
}
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
try {
System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
} catch (UnsupportedEncodingException e) {
}
}
if (isPlaybackMode()) {
testProfile = PLAYBACK_PROFILE;
List<HttpPipelinePolicy> policies = new ArrayList<>();
httpPipeline = buildHttpPipeline(
request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)),
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
interceptorManager.getPlaybackClient());
if (!testContextManager.doNotRecordTest()) {
interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version"))));
addSanitizers();
}
} else {
if (System.getenv(AZURE_AUTH_LOCATION) != null) {
final File credFile = new File(System.getenv(AZURE_AUTH_LOCATION));
try {
testAuthFile = AuthFile.parse(credFile);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("Cannot parse auth file. Please check file format.", e));
}
credential = testAuthFile.getCredential();
testProfile = new AzureProfile(testAuthFile.getTenantId(), testAuthFile.getSubscriptionId(), testAuthFile.getEnvironment());
} else {
Configuration configuration = Configuration.getGlobalConfiguration();
String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID);
String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID);
String clientSecret = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_SECRET);
String subscriptionId = configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID);
if (clientId == null || tenantId == null || clientSecret == null || subscriptionId == null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("When running tests in record mode either 'AZURE_AUTH_LOCATION' or 'AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET and AZURE_SUBSCRIPTION_ID' needs to be set"));
}
credential = new ClientSecretCredentialBuilder()
.tenantId(tenantId)
.clientId(clientId)
.clientSecret(clientSecret)
.authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint())
.build();
testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE);
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) {
policies.add(this.interceptorManager.getRecordPolicy());
addSanitizers();
}
if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) {
policies.add(new HttpDebugLoggingPolicy());
httpLogDetailLevel = HttpLogDetailLevel.NONE;
}
httpPipeline = buildHttpPipeline(
credential,
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
generateHttpClientWithProxy(null, null));
}
initializeClients(httpPipeline, testProfile);
} | if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) { | protected void beforeTest() {
TokenCredential credential;
HttpPipeline httpPipeline;
String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL);
HttpLogDetailLevel httpLogDetailLevel;
try {
httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel);
} catch (Exception e) {
if (isPlaybackMode()) {
httpLogDetailLevel = HttpLogDetailLevel.NONE;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL);
} else {
httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL);
}
}
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
try {
System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
} catch (UnsupportedEncodingException e) {
}
}
if (isPlaybackMode()) {
testProfile = PLAYBACK_PROFILE;
List<HttpPipelinePolicy> policies = new ArrayList<>();
httpPipeline = buildHttpPipeline(
request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)),
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
interceptorManager.getPlaybackClient());
if (!testContextManager.doNotRecordTest()) {
interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version"))));
addSanitizers();
}
} else {
if (System.getenv(AZURE_AUTH_LOCATION) != null) {
final File credFile = new File(System.getenv(AZURE_AUTH_LOCATION));
try {
testAuthFile = AuthFile.parse(credFile);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("Cannot parse auth file. Please check file format.", e));
}
credential = testAuthFile.getCredential();
testProfile = new AzureProfile(testAuthFile.getTenantId(), testAuthFile.getSubscriptionId(), testAuthFile.getEnvironment());
} else {
Configuration configuration = Configuration.getGlobalConfiguration();
String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID);
String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID);
String clientSecret = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_SECRET);
String subscriptionId = configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID);
if (clientId == null || tenantId == null || clientSecret == null || subscriptionId == null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("When running tests in record mode either 'AZURE_AUTH_LOCATION' or 'AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET and AZURE_SUBSCRIPTION_ID' needs to be set"));
}
credential = new ClientSecretCredentialBuilder()
.tenantId(tenantId)
.clientId(clientId)
.clientSecret(clientSecret)
.authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint())
.build();
testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE);
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) {
policies.add(this.interceptorManager.getRecordPolicy());
addSanitizers();
}
if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) {
policies.add(new HttpDebugLoggingPolicy());
httpLogDetailLevel = HttpLogDetailLevel.NONE;
}
httpPipeline = buildHttpPipeline(
credential,
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
generateHttpClientWithProxy(null, null));
}
initializeClients(httpPipeline, testProfile);
} | class ResourceManagerTestProxyTestBase extends TestProxyTestBase {
private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000";
private static final String ZERO_SUBSCRIPTION = ZERO_UUID;
private static final String ZERO_TENANT = ZERO_UUID;
private static final String PLAYBACK_URI_BASE = "https:
private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION";
private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL";
private static final String HTTPS_PROXY_HOST = "https.proxyHost";
private static final String HTTPS_PROXY_PORT = "https.proxyPort";
private static final String HTTP_PROXY_HOST = "http.proxyHost";
private static final String HTTP_PROXY_PORT = "http.proxyPort";
private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies";
private static final String VALUE_TRUE = "true";
private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234";
private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile(
ZERO_TENANT,
ZERO_SUBSCRIPTION,
new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values())
.collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI)))
);
private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() {
@Override
public void write(int b) {
}
};
/**
* Redacted value.
*/
protected static final String REDACTED_VALUE = "REDACTED";
private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class);
private AzureProfile testProfile;
private AuthFile testAuthFile;
private boolean isSkipInPlayback;
private final List<TestProxySanitizer> sanitizers = new ArrayList<>();
/**
* Sets upper bound execution timeout for each @Test method.
* {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper
* bound.
*/
@RegisterExtension
final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(30));
/**
* Generates a random resource name.
*
* @param prefix Prefix for the resource name.
* @param maxLen Maximum length of the resource name.
* @return A randomly generated resource name with a given prefix and maximum length.
*/
protected String generateRandomResourceName(String prefix, int maxLen) {
return testResourceNamer.randomName(prefix, maxLen);
}
/**
* @return A randomly generated UUID.
*/
protected String generateRandomUuid() {
return testResourceNamer.randomUuid();
}
/**
* @return random password
*/
public static String password() {
String password = new ResourceNamer("").randomName("Pa5$", 12);
LOGGER.info("Password: {}", password);
return password;
}
private static String sshPublicKey;
/**
* @return an SSH public key
*/
public static String sshPublicKey() {
if (sshPublicKey == null) {
try {
KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA");
keyGen.initialize(1024);
KeyPair pair = keyGen.generateKeyPair();
PublicKey publicKey = pair.getPublic();
RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey;
ByteArrayOutputStream byteOs = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(byteOs);
dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length);
dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII));
dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length);
dos.write(rsaPublicKey.getPublicExponent().toByteArray());
dos.writeInt(rsaPublicKey.getModulus().toByteArray().length);
dos.write(rsaPublicKey.getModulus().toByteArray());
String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII);
sshPublicKey = "ssh-rsa " + publicKeyEncoded;
} catch (NoSuchAlgorithmException | IOException e) {
throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e));
}
}
return sshPublicKey;
}
/**
* Loads a credential from file.
*
* @return A credential loaded from a file.
*/
protected TokenCredential credentialFromFile() {
return testAuthFile.getCredential();
}
/**
* Loads a client ID from file.
*
* @return A client ID loaded from a file.
*/
protected String clientIdFromFile() {
String clientId = testAuthFile == null ? null : testAuthFile.getClientId();
return testResourceNamer.recordValueFromConfig(clientId);
}
/**
* @return The test profile.
*/
protected AzureProfile profile() {
return testProfile;
}
/**
* @return Whether the test mode is {@link TestMode
*/
protected boolean isPlaybackMode() {
return getTestMode() == TestMode.PLAYBACK;
}
/**
* @return Whether the test should be skipped in playback.
*/
protected boolean skipInPlayback() {
if (isPlaybackMode()) {
isSkipInPlayback = true;
}
return isSkipInPlayback;
}
@Override
/**
* Generates an {@link HttpClient} with a proxy.
*
* @param clientBuilder The HttpClient builder.
* @param proxyOptions The proxy.
* @return An HttpClient with a proxy.
*/
protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) {
if (clientBuilder == null) {
clientBuilder = new NettyAsyncHttpClientBuilder();
}
if (proxyOptions != null) {
clientBuilder.proxy(proxyOptions);
} else {
try {
System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE);
List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint()));
if (!proxies.isEmpty()) {
for (Proxy proxy : proxies) {
if (proxy.address() instanceof InetSocketAddress) {
String host = ((InetSocketAddress) proxy.address()).getHostName();
int port = ((InetSocketAddress) proxy.address()).getPort();
switch (proxy.type()) {
case HTTP:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build();
case SOCKS:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build();
default:
}
}
}
}
String host = null;
int port = 0;
if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) {
host = System.getProperty(HTTPS_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT));
} else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) {
host = System.getProperty(HTTP_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT));
}
if (host != null) {
clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port)));
}
} catch (URISyntaxException e) { }
}
return clientBuilder.build();
}
@Override
protected void afterTest() {
if (!isSkipInPlayback) {
cleanUpResources();
}
}
/**
* Sets sdk context when running the tests
*
* @param internalContext the internal runtime context
* @param objects the manager classes to change internal context
* @param <T> the type of internal context
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> void setInternalContext(T internalContext, Object... objects) {
try {
for (Object obj : objects) {
for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) {
if (field.getName().equals("resourceManager")) {
setAccessible(field);
Field context = field.get(obj).getClass().getDeclaredField("internalContext");
setAccessible(context);
context.set(field.get(obj), internalContext);
}
}
for (Field field : obj.getClass().getDeclaredFields()) {
if (field.getName().equals("internalContext")) {
setAccessible(field);
field.set(obj, internalContext);
} else if (field.getName().contains("Manager")) {
setAccessible(field);
setInternalContext(internalContext, field.get(obj));
}
}
}
} catch (IllegalAccessException | NoSuchFieldException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
private void setAccessible(final AccessibleObject accessibleObject) {
Runnable runnable = () -> accessibleObject.setAccessible(true);
runnable.run();
}
/**
* Builds the manager with provided http pipeline and profile in general manner.
*
* @param manager the class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (ReflectiveOperationException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
/**
* Builds an HttpPipeline.
*
* @param credential The credentials to use in the pipeline.
* @param profile The AzureProfile to use in the pipeline.
* @param httpLogOptions The HTTP logging options to use in the pipeline.
* @param policies Additional policies to use in the pipeline.
* @param httpClient The HttpClient to use in the pipeline.
* @return A new constructed HttpPipeline.
*/
protected abstract HttpPipeline buildHttpPipeline(
TokenCredential credential,
AzureProfile profile,
HttpLogOptions httpLogOptions,
List<HttpPipelinePolicy> policies,
HttpClient httpClient);
/**
* Initializes service clients used in testing.
*
* @param httpPipeline The HttpPipeline to use in the clients.
* @param profile The AzureProfile to use in the clients.
*/
protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile);
/**
* Cleans up resources.
*/
protected abstract void cleanUpResources();
private void addSanitizers() {
List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList(
new TestProxySanitizer("(?<=/subscriptions/)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER),
new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)
));
sanitizers.addAll(this.sanitizers);
interceptorManager.addSanitizers(sanitizers);
}
/**
* Adds test proxy sanitizers.
* <p>
* Recommend to call this API in subclass constructor.
*
* @param sanitizers the test proxy sanitizers.
*/
protected void addSanitizers(TestProxySanitizer... sanitizers) {
this.sanitizers.addAll(Arrays.asList(sanitizers));
}
private final class PlaybackTimeoutInterceptor implements InvocationInterceptor {
private final Duration duration;
private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) {
Objects.requireNonNull(timeoutSupplier);
this.duration = timeoutSupplier.get();
}
@Override
public void interceptTestMethod(Invocation<Void> invocation,
ReflectiveInvocationContext<Method> invocationContext,
ExtensionContext extensionContext) throws Throwable {
if (isPlaybackMode()) {
Assertions.assertTimeoutPreemptively(duration, invocation::proceed);
} else {
invocation.proceed();
}
}
}
} | class ResourceManagerTestProxyTestBase extends TestProxyTestBase {
private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000";
private static final String ZERO_SUBSCRIPTION = ZERO_UUID;
private static final String ZERO_TENANT = ZERO_UUID;
private static final String PLAYBACK_URI_BASE = "https:
private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION";
private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL";
private static final String HTTPS_PROXY_HOST = "https.proxyHost";
private static final String HTTPS_PROXY_PORT = "https.proxyPort";
private static final String HTTP_PROXY_HOST = "http.proxyHost";
private static final String HTTP_PROXY_PORT = "http.proxyPort";
private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies";
private static final String VALUE_TRUE = "true";
private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234";
private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile(
ZERO_TENANT,
ZERO_SUBSCRIPTION,
new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values())
.collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI)))
);
private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() {
@Override
public void write(int b) {
}
@Override
public void write(byte[] b) {
}
@Override
public void write(byte[] b, int off, int len) {
}
};
/**
* Redacted value.
*/
protected static final String REDACTED_VALUE = "REDACTED";
private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class);
private AzureProfile testProfile;
private AuthFile testAuthFile;
private boolean isSkipInPlayback;
private final List<TestProxySanitizer> sanitizers = new ArrayList<>();
/**
* Sets upper bound execution timeout for each @Test method.
* {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper
* bound.
*/
@RegisterExtension
final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(60));
/**
* Generates a random resource name.
*
* @param prefix Prefix for the resource name.
* @param maxLen Maximum length of the resource name.
* @return A randomly generated resource name with a given prefix and maximum length.
*/
protected String generateRandomResourceName(String prefix, int maxLen) {
return testResourceNamer.randomName(prefix, maxLen);
}
/**
* @return A randomly generated UUID.
*/
protected String generateRandomUuid() {
return testResourceNamer.randomUuid();
}
/**
* @return random password
*/
public static String password() {
String password = new ResourceNamer("").randomName("Pa5$", 12);
LOGGER.info("Password: {}", password);
return password;
}
private static String sshPublicKey;
/**
* @return an SSH public key
*/
public static String sshPublicKey() {
if (sshPublicKey == null) {
try {
KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA");
keyGen.initialize(1024);
KeyPair pair = keyGen.generateKeyPair();
PublicKey publicKey = pair.getPublic();
RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey;
ByteArrayOutputStream byteOs = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(byteOs);
dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length);
dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII));
dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length);
dos.write(rsaPublicKey.getPublicExponent().toByteArray());
dos.writeInt(rsaPublicKey.getModulus().toByteArray().length);
dos.write(rsaPublicKey.getModulus().toByteArray());
String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII);
sshPublicKey = "ssh-rsa " + publicKeyEncoded;
} catch (NoSuchAlgorithmException | IOException e) {
throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e));
}
}
return sshPublicKey;
}
/**
* Loads a credential from file.
*
* @return A credential loaded from a file.
*/
protected TokenCredential credentialFromFile() {
return testAuthFile.getCredential();
}
/**
* Loads a client ID from file.
*
* @return A client ID loaded from a file.
*/
protected String clientIdFromFile() {
String clientId = testAuthFile == null ? null : testAuthFile.getClientId();
return testResourceNamer.recordValueFromConfig(clientId);
}
/**
* @return The test profile.
*/
protected AzureProfile profile() {
return testProfile;
}
/**
* @return Whether the test mode is {@link TestMode
*/
protected boolean isPlaybackMode() {
return getTestMode() == TestMode.PLAYBACK;
}
/**
* @return Whether the test should be skipped in playback.
*/
protected boolean skipInPlayback() {
if (isPlaybackMode()) {
isSkipInPlayback = true;
}
return isSkipInPlayback;
}
@Override
/**
* Generates an {@link HttpClient} with a proxy.
*
* @param clientBuilder The HttpClient builder.
* @param proxyOptions The proxy.
* @return An HttpClient with a proxy.
*/
protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) {
if (clientBuilder == null) {
clientBuilder = new NettyAsyncHttpClientBuilder();
}
if (proxyOptions != null) {
clientBuilder.proxy(proxyOptions);
} else {
try {
System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE);
List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint()));
if (!proxies.isEmpty()) {
for (Proxy proxy : proxies) {
if (proxy.address() instanceof InetSocketAddress) {
String host = ((InetSocketAddress) proxy.address()).getHostName();
int port = ((InetSocketAddress) proxy.address()).getPort();
switch (proxy.type()) {
case HTTP:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build();
case SOCKS:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build();
default:
}
}
}
}
String host = null;
int port = 0;
if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) {
host = System.getProperty(HTTPS_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT));
} else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) {
host = System.getProperty(HTTP_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT));
}
if (host != null) {
clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port)));
}
} catch (URISyntaxException ignored) { }
}
return clientBuilder.build();
}
@Override
protected void afterTest() {
if (!isSkipInPlayback) {
cleanUpResources();
}
}
/**
* Sets sdk context when running the tests
*
* @param internalContext the internal runtime context
* @param objects the manager classes to change internal context
* @param <T> the type of internal context
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> void setInternalContext(T internalContext, Object... objects) {
try {
for (Object obj : objects) {
for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) {
if (field.getName().equals("resourceManager")) {
setAccessible(field);
Field context = field.get(obj).getClass().getDeclaredField("internalContext");
setAccessible(context);
context.set(field.get(obj), internalContext);
}
}
for (Field field : obj.getClass().getDeclaredFields()) {
if (field.getName().equals("internalContext")) {
setAccessible(field);
field.set(obj, internalContext);
} else if (field.getName().contains("Manager")) {
setAccessible(field);
setInternalContext(internalContext, field.get(obj));
}
}
}
} catch (IllegalAccessException | NoSuchFieldException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
private void setAccessible(final AccessibleObject accessibleObject) {
Runnable runnable = () -> accessibleObject.setAccessible(true);
runnable.run();
}
/**
* Builds the manager with provided http pipeline and profile in general manner.
*
* @param manager the class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (ReflectiveOperationException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
/**
* Builds an HttpPipeline.
*
* @param credential The credentials to use in the pipeline.
* @param profile The AzureProfile to use in the pipeline.
* @param httpLogOptions The HTTP logging options to use in the pipeline.
* @param policies Additional policies to use in the pipeline.
* @param httpClient The HttpClient to use in the pipeline.
* @return A new constructed HttpPipeline.
*/
protected abstract HttpPipeline buildHttpPipeline(
TokenCredential credential,
AzureProfile profile,
HttpLogOptions httpLogOptions,
List<HttpPipelinePolicy> policies,
HttpClient httpClient);
/**
* Initializes service clients used in testing.
*
* @param httpPipeline The HttpPipeline to use in the clients.
* @param profile The AzureProfile to use in the clients.
*/
protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile);
/**
* Cleans up resources.
*/
protected abstract void cleanUpResources();
private void addSanitizers() {
List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList(
new TestProxySanitizer("(?<=/subscriptions/)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER),
new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)
));
sanitizers.addAll(this.sanitizers);
interceptorManager.addSanitizers(sanitizers);
}
/**
* Adds test proxy sanitizers.
* <p>
* Recommend to call this API in subclass constructor.
*
* @param sanitizers the test proxy sanitizers.
*/
protected void addSanitizers(TestProxySanitizer... sanitizers) {
this.sanitizers.addAll(Arrays.asList(sanitizers));
}
private final class PlaybackTimeoutInterceptor implements InvocationInterceptor {
private final Duration duration;
private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) {
Objects.requireNonNull(timeoutSupplier);
this.duration = timeoutSupplier.get();
}
@Override
public void interceptTestMethod(Invocation<Void> invocation,
ReflectiveInvocationContext<Method> invocationContext,
ExtensionContext extensionContext) throws Throwable {
if (isPlaybackMode()) {
Assertions.assertTimeoutPreemptively(duration, invocation::proceed);
} else {
invocation.proceed();
}
}
}
} |
This is a path that only is reached when the test mode is non-playback | protected void beforeTest() {
TokenCredential credential;
HttpPipeline httpPipeline;
String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL);
HttpLogDetailLevel httpLogDetailLevel;
try {
httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel);
} catch (Exception e) {
if (isPlaybackMode()) {
httpLogDetailLevel = HttpLogDetailLevel.NONE;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL);
} else {
httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL);
}
}
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
try {
System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
} catch (UnsupportedEncodingException e) {
}
}
if (isPlaybackMode()) {
testProfile = PLAYBACK_PROFILE;
List<HttpPipelinePolicy> policies = new ArrayList<>();
httpPipeline = buildHttpPipeline(
request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)),
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
interceptorManager.getPlaybackClient());
if (!testContextManager.doNotRecordTest()) {
interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version"))));
addSanitizers();
}
} else {
if (System.getenv(AZURE_AUTH_LOCATION) != null) {
final File credFile = new File(System.getenv(AZURE_AUTH_LOCATION));
try {
testAuthFile = AuthFile.parse(credFile);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("Cannot parse auth file. Please check file format.", e));
}
credential = testAuthFile.getCredential();
testProfile = new AzureProfile(testAuthFile.getTenantId(), testAuthFile.getSubscriptionId(), testAuthFile.getEnvironment());
} else {
Configuration configuration = Configuration.getGlobalConfiguration();
String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID);
String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID);
String clientSecret = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_SECRET);
String subscriptionId = configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID);
if (clientId == null || tenantId == null || clientSecret == null || subscriptionId == null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("When running tests in record mode either 'AZURE_AUTH_LOCATION' or 'AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET and AZURE_SUBSCRIPTION_ID' needs to be set"));
}
credential = new ClientSecretCredentialBuilder()
.tenantId(tenantId)
.clientId(clientId)
.clientSecret(clientSecret)
.authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint())
.build();
testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE);
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) {
policies.add(this.interceptorManager.getRecordPolicy());
addSanitizers();
}
if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) {
policies.add(new HttpDebugLoggingPolicy());
httpLogDetailLevel = HttpLogDetailLevel.NONE;
}
httpPipeline = buildHttpPipeline(
credential,
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
generateHttpClientWithProxy(null, null));
}
initializeClients(httpPipeline, testProfile);
} | if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) { | protected void beforeTest() {
TokenCredential credential;
HttpPipeline httpPipeline;
String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL);
HttpLogDetailLevel httpLogDetailLevel;
try {
httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel);
} catch (Exception e) {
if (isPlaybackMode()) {
httpLogDetailLevel = HttpLogDetailLevel.NONE;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL);
} else {
httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL);
}
}
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
try {
System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
} catch (UnsupportedEncodingException e) {
}
}
if (isPlaybackMode()) {
testProfile = PLAYBACK_PROFILE;
List<HttpPipelinePolicy> policies = new ArrayList<>();
httpPipeline = buildHttpPipeline(
request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)),
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
interceptorManager.getPlaybackClient());
if (!testContextManager.doNotRecordTest()) {
interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version"))));
addSanitizers();
}
} else {
if (System.getenv(AZURE_AUTH_LOCATION) != null) {
final File credFile = new File(System.getenv(AZURE_AUTH_LOCATION));
try {
testAuthFile = AuthFile.parse(credFile);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("Cannot parse auth file. Please check file format.", e));
}
credential = testAuthFile.getCredential();
testProfile = new AzureProfile(testAuthFile.getTenantId(), testAuthFile.getSubscriptionId(), testAuthFile.getEnvironment());
} else {
Configuration configuration = Configuration.getGlobalConfiguration();
String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID);
String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID);
String clientSecret = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_SECRET);
String subscriptionId = configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID);
if (clientId == null || tenantId == null || clientSecret == null || subscriptionId == null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("When running tests in record mode either 'AZURE_AUTH_LOCATION' or 'AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET and AZURE_SUBSCRIPTION_ID' needs to be set"));
}
credential = new ClientSecretCredentialBuilder()
.tenantId(tenantId)
.clientId(clientId)
.clientSecret(clientSecret)
.authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint())
.build();
testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE);
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) {
policies.add(this.interceptorManager.getRecordPolicy());
addSanitizers();
}
if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) {
policies.add(new HttpDebugLoggingPolicy());
httpLogDetailLevel = HttpLogDetailLevel.NONE;
}
httpPipeline = buildHttpPipeline(
credential,
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
generateHttpClientWithProxy(null, null));
}
initializeClients(httpPipeline, testProfile);
} | class ResourceManagerTestProxyTestBase extends TestProxyTestBase {
private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000";
private static final String ZERO_SUBSCRIPTION = ZERO_UUID;
private static final String ZERO_TENANT = ZERO_UUID;
private static final String PLAYBACK_URI_BASE = "https:
private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION";
private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL";
private static final String HTTPS_PROXY_HOST = "https.proxyHost";
private static final String HTTPS_PROXY_PORT = "https.proxyPort";
private static final String HTTP_PROXY_HOST = "http.proxyHost";
private static final String HTTP_PROXY_PORT = "http.proxyPort";
private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies";
private static final String VALUE_TRUE = "true";
private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234";
private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile(
ZERO_TENANT,
ZERO_SUBSCRIPTION,
new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values())
.collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI)))
);
private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() {
@Override
public void write(int b) {
}
};
/**
* Redacted value.
*/
protected static final String REDACTED_VALUE = "REDACTED";
private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class);
private AzureProfile testProfile;
private AuthFile testAuthFile;
private boolean isSkipInPlayback;
private final List<TestProxySanitizer> sanitizers = new ArrayList<>();
/**
* Sets upper bound execution timeout for each @Test method.
* {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper
* bound.
*/
@RegisterExtension
final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(30));
/**
* Generates a random resource name.
*
* @param prefix Prefix for the resource name.
* @param maxLen Maximum length of the resource name.
* @return A randomly generated resource name with a given prefix and maximum length.
*/
protected String generateRandomResourceName(String prefix, int maxLen) {
return testResourceNamer.randomName(prefix, maxLen);
}
/**
* @return A randomly generated UUID.
*/
protected String generateRandomUuid() {
return testResourceNamer.randomUuid();
}
/**
* @return random password
*/
public static String password() {
String password = new ResourceNamer("").randomName("Pa5$", 12);
LOGGER.info("Password: {}", password);
return password;
}
private static String sshPublicKey;
/**
* @return an SSH public key
*/
public static String sshPublicKey() {
if (sshPublicKey == null) {
try {
KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA");
keyGen.initialize(1024);
KeyPair pair = keyGen.generateKeyPair();
PublicKey publicKey = pair.getPublic();
RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey;
ByteArrayOutputStream byteOs = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(byteOs);
dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length);
dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII));
dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length);
dos.write(rsaPublicKey.getPublicExponent().toByteArray());
dos.writeInt(rsaPublicKey.getModulus().toByteArray().length);
dos.write(rsaPublicKey.getModulus().toByteArray());
String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII);
sshPublicKey = "ssh-rsa " + publicKeyEncoded;
} catch (NoSuchAlgorithmException | IOException e) {
throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e));
}
}
return sshPublicKey;
}
/**
* Loads a credential from file.
*
* @return A credential loaded from a file.
*/
protected TokenCredential credentialFromFile() {
return testAuthFile.getCredential();
}
/**
* Loads a client ID from file.
*
* @return A client ID loaded from a file.
*/
protected String clientIdFromFile() {
String clientId = testAuthFile == null ? null : testAuthFile.getClientId();
return testResourceNamer.recordValueFromConfig(clientId);
}
/**
* @return The test profile.
*/
protected AzureProfile profile() {
return testProfile;
}
/**
* @return Whether the test mode is {@link TestMode
*/
protected boolean isPlaybackMode() {
return getTestMode() == TestMode.PLAYBACK;
}
/**
* @return Whether the test should be skipped in playback.
*/
protected boolean skipInPlayback() {
if (isPlaybackMode()) {
isSkipInPlayback = true;
}
return isSkipInPlayback;
}
@Override
/**
* Generates an {@link HttpClient} with a proxy.
*
* @param clientBuilder The HttpClient builder.
* @param proxyOptions The proxy.
* @return An HttpClient with a proxy.
*/
protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) {
if (clientBuilder == null) {
clientBuilder = new NettyAsyncHttpClientBuilder();
}
if (proxyOptions != null) {
clientBuilder.proxy(proxyOptions);
} else {
try {
System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE);
List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint()));
if (!proxies.isEmpty()) {
for (Proxy proxy : proxies) {
if (proxy.address() instanceof InetSocketAddress) {
String host = ((InetSocketAddress) proxy.address()).getHostName();
int port = ((InetSocketAddress) proxy.address()).getPort();
switch (proxy.type()) {
case HTTP:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build();
case SOCKS:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build();
default:
}
}
}
}
String host = null;
int port = 0;
if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) {
host = System.getProperty(HTTPS_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT));
} else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) {
host = System.getProperty(HTTP_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT));
}
if (host != null) {
clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port)));
}
} catch (URISyntaxException e) { }
}
return clientBuilder.build();
}
@Override
protected void afterTest() {
if (!isSkipInPlayback) {
cleanUpResources();
}
}
/**
* Sets sdk context when running the tests
*
* @param internalContext the internal runtime context
* @param objects the manager classes to change internal context
* @param <T> the type of internal context
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> void setInternalContext(T internalContext, Object... objects) {
try {
for (Object obj : objects) {
for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) {
if (field.getName().equals("resourceManager")) {
setAccessible(field);
Field context = field.get(obj).getClass().getDeclaredField("internalContext");
setAccessible(context);
context.set(field.get(obj), internalContext);
}
}
for (Field field : obj.getClass().getDeclaredFields()) {
if (field.getName().equals("internalContext")) {
setAccessible(field);
field.set(obj, internalContext);
} else if (field.getName().contains("Manager")) {
setAccessible(field);
setInternalContext(internalContext, field.get(obj));
}
}
}
} catch (IllegalAccessException | NoSuchFieldException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
private void setAccessible(final AccessibleObject accessibleObject) {
Runnable runnable = () -> accessibleObject.setAccessible(true);
runnable.run();
}
/**
* Builds the manager with provided http pipeline and profile in general manner.
*
* @param manager the class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (ReflectiveOperationException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
/**
* Builds an HttpPipeline.
*
* @param credential The credentials to use in the pipeline.
* @param profile The AzureProfile to use in the pipeline.
* @param httpLogOptions The HTTP logging options to use in the pipeline.
* @param policies Additional policies to use in the pipeline.
* @param httpClient The HttpClient to use in the pipeline.
* @return A new constructed HttpPipeline.
*/
protected abstract HttpPipeline buildHttpPipeline(
TokenCredential credential,
AzureProfile profile,
HttpLogOptions httpLogOptions,
List<HttpPipelinePolicy> policies,
HttpClient httpClient);
/**
* Initializes service clients used in testing.
*
* @param httpPipeline The HttpPipeline to use in the clients.
* @param profile The AzureProfile to use in the clients.
*/
protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile);
/**
* Cleans up resources.
*/
protected abstract void cleanUpResources();
private void addSanitizers() {
List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList(
new TestProxySanitizer("(?<=/subscriptions/)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER),
new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)
));
sanitizers.addAll(this.sanitizers);
interceptorManager.addSanitizers(sanitizers);
}
/**
* Adds test proxy sanitizers.
* <p>
* Recommend to call this API in subclass constructor.
*
* @param sanitizers the test proxy sanitizers.
*/
protected void addSanitizers(TestProxySanitizer... sanitizers) {
this.sanitizers.addAll(Arrays.asList(sanitizers));
}
private final class PlaybackTimeoutInterceptor implements InvocationInterceptor {
private final Duration duration;
private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) {
Objects.requireNonNull(timeoutSupplier);
this.duration = timeoutSupplier.get();
}
@Override
public void interceptTestMethod(Invocation<Void> invocation,
ReflectiveInvocationContext<Method> invocationContext,
ExtensionContext extensionContext) throws Throwable {
if (isPlaybackMode()) {
Assertions.assertTimeoutPreemptively(duration, invocation::proceed);
} else {
invocation.proceed();
}
}
}
} | class ResourceManagerTestProxyTestBase extends TestProxyTestBase {
private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000";
private static final String ZERO_SUBSCRIPTION = ZERO_UUID;
private static final String ZERO_TENANT = ZERO_UUID;
private static final String PLAYBACK_URI_BASE = "https:
private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION";
private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL";
private static final String HTTPS_PROXY_HOST = "https.proxyHost";
private static final String HTTPS_PROXY_PORT = "https.proxyPort";
private static final String HTTP_PROXY_HOST = "http.proxyHost";
private static final String HTTP_PROXY_PORT = "http.proxyPort";
private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies";
private static final String VALUE_TRUE = "true";
private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234";
private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile(
ZERO_TENANT,
ZERO_SUBSCRIPTION,
new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values())
.collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI)))
);
private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() {
@Override
public void write(int b) {
}
@Override
public void write(byte[] b) {
}
@Override
public void write(byte[] b, int off, int len) {
}
};
/**
* Redacted value.
*/
protected static final String REDACTED_VALUE = "REDACTED";
private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class);
private AzureProfile testProfile;
private AuthFile testAuthFile;
private boolean isSkipInPlayback;
private final List<TestProxySanitizer> sanitizers = new ArrayList<>();
/**
* Sets upper bound execution timeout for each @Test method.
* {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper
* bound.
*/
@RegisterExtension
final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(60));
/**
* Generates a random resource name.
*
* @param prefix Prefix for the resource name.
* @param maxLen Maximum length of the resource name.
* @return A randomly generated resource name with a given prefix and maximum length.
*/
protected String generateRandomResourceName(String prefix, int maxLen) {
return testResourceNamer.randomName(prefix, maxLen);
}
/**
* @return A randomly generated UUID.
*/
protected String generateRandomUuid() {
return testResourceNamer.randomUuid();
}
/**
* @return random password
*/
public static String password() {
String password = new ResourceNamer("").randomName("Pa5$", 12);
LOGGER.info("Password: {}", password);
return password;
}
private static String sshPublicKey;
/**
* @return an SSH public key
*/
public static String sshPublicKey() {
if (sshPublicKey == null) {
try {
KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA");
keyGen.initialize(1024);
KeyPair pair = keyGen.generateKeyPair();
PublicKey publicKey = pair.getPublic();
RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey;
ByteArrayOutputStream byteOs = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(byteOs);
dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length);
dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII));
dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length);
dos.write(rsaPublicKey.getPublicExponent().toByteArray());
dos.writeInt(rsaPublicKey.getModulus().toByteArray().length);
dos.write(rsaPublicKey.getModulus().toByteArray());
String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII);
sshPublicKey = "ssh-rsa " + publicKeyEncoded;
} catch (NoSuchAlgorithmException | IOException e) {
throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e));
}
}
return sshPublicKey;
}
/**
* Loads a credential from file.
*
* @return A credential loaded from a file.
*/
protected TokenCredential credentialFromFile() {
return testAuthFile.getCredential();
}
/**
* Loads a client ID from file.
*
* @return A client ID loaded from a file.
*/
protected String clientIdFromFile() {
String clientId = testAuthFile == null ? null : testAuthFile.getClientId();
return testResourceNamer.recordValueFromConfig(clientId);
}
/**
* @return The test profile.
*/
protected AzureProfile profile() {
return testProfile;
}
/**
* @return Whether the test mode is {@link TestMode
*/
protected boolean isPlaybackMode() {
return getTestMode() == TestMode.PLAYBACK;
}
/**
* @return Whether the test should be skipped in playback.
*/
protected boolean skipInPlayback() {
if (isPlaybackMode()) {
isSkipInPlayback = true;
}
return isSkipInPlayback;
}
@Override
/**
* Generates an {@link HttpClient} with a proxy.
*
* @param clientBuilder The HttpClient builder.
* @param proxyOptions The proxy.
* @return An HttpClient with a proxy.
*/
protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) {
if (clientBuilder == null) {
clientBuilder = new NettyAsyncHttpClientBuilder();
}
if (proxyOptions != null) {
clientBuilder.proxy(proxyOptions);
} else {
try {
System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE);
List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint()));
if (!proxies.isEmpty()) {
for (Proxy proxy : proxies) {
if (proxy.address() instanceof InetSocketAddress) {
String host = ((InetSocketAddress) proxy.address()).getHostName();
int port = ((InetSocketAddress) proxy.address()).getPort();
switch (proxy.type()) {
case HTTP:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build();
case SOCKS:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build();
default:
}
}
}
}
String host = null;
int port = 0;
if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) {
host = System.getProperty(HTTPS_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT));
} else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) {
host = System.getProperty(HTTP_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT));
}
if (host != null) {
clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port)));
}
} catch (URISyntaxException ignored) { }
}
return clientBuilder.build();
}
@Override
protected void afterTest() {
if (!isSkipInPlayback) {
cleanUpResources();
}
}
/**
* Sets sdk context when running the tests
*
* @param internalContext the internal runtime context
* @param objects the manager classes to change internal context
* @param <T> the type of internal context
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> void setInternalContext(T internalContext, Object... objects) {
try {
for (Object obj : objects) {
for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) {
if (field.getName().equals("resourceManager")) {
setAccessible(field);
Field context = field.get(obj).getClass().getDeclaredField("internalContext");
setAccessible(context);
context.set(field.get(obj), internalContext);
}
}
for (Field field : obj.getClass().getDeclaredFields()) {
if (field.getName().equals("internalContext")) {
setAccessible(field);
field.set(obj, internalContext);
} else if (field.getName().contains("Manager")) {
setAccessible(field);
setInternalContext(internalContext, field.get(obj));
}
}
}
} catch (IllegalAccessException | NoSuchFieldException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
private void setAccessible(final AccessibleObject accessibleObject) {
Runnable runnable = () -> accessibleObject.setAccessible(true);
runnable.run();
}
/**
* Builds the manager with provided http pipeline and profile in general manner.
*
* @param manager the class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (ReflectiveOperationException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
/**
* Builds an HttpPipeline.
*
* @param credential The credentials to use in the pipeline.
* @param profile The AzureProfile to use in the pipeline.
* @param httpLogOptions The HTTP logging options to use in the pipeline.
* @param policies Additional policies to use in the pipeline.
* @param httpClient The HttpClient to use in the pipeline.
* @return A new constructed HttpPipeline.
*/
protected abstract HttpPipeline buildHttpPipeline(
TokenCredential credential,
AzureProfile profile,
HttpLogOptions httpLogOptions,
List<HttpPipelinePolicy> policies,
HttpClient httpClient);
/**
* Initializes service clients used in testing.
*
* @param httpPipeline The HttpPipeline to use in the clients.
* @param profile The AzureProfile to use in the clients.
*/
protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile);
/**
* Cleans up resources.
*/
protected abstract void cleanUpResources();
private void addSanitizers() {
List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList(
new TestProxySanitizer("(?<=/subscriptions/)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER),
new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)
));
sanitizers.addAll(this.sanitizers);
interceptorManager.addSanitizers(sanitizers);
}
/**
* Adds test proxy sanitizers.
* <p>
* Recommend to call this API in subclass constructor.
*
* @param sanitizers the test proxy sanitizers.
*/
protected void addSanitizers(TestProxySanitizer... sanitizers) {
this.sanitizers.addAll(Arrays.asList(sanitizers));
}
private final class PlaybackTimeoutInterceptor implements InvocationInterceptor {
private final Duration duration;
private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) {
Objects.requireNonNull(timeoutSupplier);
this.duration = timeoutSupplier.get();
}
@Override
public void interceptTestMethod(Invocation<Void> invocation,
ReflectiveInvocationContext<Method> invocationContext,
ExtensionContext extensionContext) throws Throwable {
if (isPlaybackMode()) {
Assertions.assertTimeoutPreemptively(duration, invocation::proceed);
} else {
invocation.proceed();
}
}
}
} |
Since this now includes gateway mode as well, should we call this `simpleClientBuildersWithoutRetryOnThrottledRequests` ? | protected static void truncateCollection(CosmosAsyncContainer cosmosContainer) {
CosmosContainerProperties cosmosContainerProperties = cosmosContainer.read().block().getProperties();
String cosmosContainerId = cosmosContainerProperties.getId();
logger.info("Truncating collection {} ...", cosmosContainerId);
List<String> paths = cosmosContainerProperties.getPartitionKeyDefinition().getPaths();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(-1);
int maxItemCount = 100;
logger.info("Truncating collection {} documents ...", cosmosContainer.getId());
cosmosContainer.queryItems("SELECT * FROM root", options, InternalObjectNode.class)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(doc -> {
PartitionKey partitionKey = null;
Object propertyValue = null;
if (paths != null && !paths.isEmpty()) {
List<String> pkPath = PathParser.getPathParts(paths.get(0));
propertyValue = ModelBridgeInternal.getObjectByPathFromJsonSerializable(doc, pkPath);
if (propertyValue == null) {
partitionKey = PartitionKey.NONE;
} else {
partitionKey = new PartitionKey(propertyValue);
}
} else {
partitionKey = new PartitionKey(null);
}
return cosmosContainer.deleteItem(doc.getId(), partitionKey);
}).then().block();
logger.info("Truncating collection {} triggers ...", cosmosContainerId);
cosmosContainer.getScripts().queryTriggers("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(trigger -> {
return cosmosContainer.getScripts().getTrigger(trigger.getId()).delete();
}).then().block();
logger.info("Truncating collection {} storedProcedures ...", cosmosContainerId);
cosmosContainer.getScripts().queryStoredProcedures("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(storedProcedure -> {
return cosmosContainer.getScripts().getStoredProcedure(storedProcedure.getId()).delete(new CosmosStoredProcedureRequestOptions());
}).then().block();
logger.info("Truncating collection {} udfs ...", cosmosContainerId);
cosmosContainer.getScripts().queryUserDefinedFunctions("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(udf -> {
return cosmosContainer.getScripts().getUserDefinedFunction(udf.getId()).delete();
}).then().block();
logger.info("Finished truncating collection {}.", cosmosContainerId);
}
@SuppressWarnings({"fallthrough"})
protected static void waitIfNeededForReplicasToCatchUp(CosmosClientBuilder clientBuilder) {
switch (CosmosBridgeInternal.getConsistencyLevel(clientBuilder)) {
case EVENTUAL:
case CONSISTENT_PREFIX:
logger.info(" additional wait in EVENTUAL mode so the replica catch up");
try {
TimeUnit.MILLISECONDS.sleep(WAIT_REPLICA_CATCH_UP_IN_MILLIS);
} catch (Exception e) {
logger.error("unexpected failure", e);
}
case SESSION:
case BOUNDED_STALENESS:
case STRONG:
default:
break;
}
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options, int throughput) {
database.createContainer(cosmosContainerProperties, ThroughputProperties.createManualThroughput(throughput), options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options) {
database.createContainer(cosmosContainerProperties, options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
private static CosmosContainerProperties getCollectionDefinitionMultiPartitionWithCompositeAndSpatialIndexes() {
final String NUMBER_FIELD = "numberField";
final String STRING_FIELD = "stringField";
final String NUMBER_FIELD_2 = "numberField2";
final String STRING_FIELD_2 = "stringField2";
final String BOOL_FIELD = "boolField";
final String NULL_FIELD = "nullField";
final String OBJECT_FIELD = "objectField";
final String ARRAY_FIELD = "arrayField";
final String SHORT_STRING_FIELD = "shortStringField";
final String MEDIUM_STRING_FIELD = "mediumStringField";
final String LONG_STRING_FIELD = "longStringField";
final String PARTITION_KEY = "pk";
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
ArrayList<String> partitionKeyPaths = new ArrayList<String>();
partitionKeyPaths.add("/" + PARTITION_KEY);
partitionKeyDefinition.setPaths(partitionKeyPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<List<CompositePath>> compositeIndexes = new ArrayList<>();
ArrayList<CompositePath> compositeIndexSimple = new ArrayList<CompositePath>();
CompositePath compositePath1 = new CompositePath();
compositePath1.setPath("/" + NUMBER_FIELD);
compositePath1.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath2 = new CompositePath();
compositePath2.setPath("/" + STRING_FIELD);
compositePath2.setOrder(CompositePathSortOrder.DESCENDING);
compositeIndexSimple.add(compositePath1);
compositeIndexSimple.add(compositePath2);
ArrayList<CompositePath> compositeIndexMaxColumns = new ArrayList<CompositePath>();
CompositePath compositePath3 = new CompositePath();
compositePath3.setPath("/" + NUMBER_FIELD);
compositePath3.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath4 = new CompositePath();
compositePath4.setPath("/" + STRING_FIELD);
compositePath4.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath5 = new CompositePath();
compositePath5.setPath("/" + NUMBER_FIELD_2);
compositePath5.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath6 = new CompositePath();
compositePath6.setPath("/" + STRING_FIELD_2);
compositePath6.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexMaxColumns.add(compositePath3);
compositeIndexMaxColumns.add(compositePath4);
compositeIndexMaxColumns.add(compositePath5);
compositeIndexMaxColumns.add(compositePath6);
ArrayList<CompositePath> compositeIndexPrimitiveValues = new ArrayList<CompositePath>();
CompositePath compositePath7 = new CompositePath();
compositePath7.setPath("/" + NUMBER_FIELD);
compositePath7.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath8 = new CompositePath();
compositePath8.setPath("/" + STRING_FIELD);
compositePath8.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath9 = new CompositePath();
compositePath9.setPath("/" + BOOL_FIELD);
compositePath9.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath10 = new CompositePath();
compositePath10.setPath("/" + NULL_FIELD);
compositePath10.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexPrimitiveValues.add(compositePath7);
compositeIndexPrimitiveValues.add(compositePath8);
compositeIndexPrimitiveValues.add(compositePath9);
compositeIndexPrimitiveValues.add(compositePath10);
ArrayList<CompositePath> compositeIndexLongStrings = new ArrayList<CompositePath>();
CompositePath compositePath11 = new CompositePath();
compositePath11.setPath("/" + STRING_FIELD);
CompositePath compositePath12 = new CompositePath();
compositePath12.setPath("/" + SHORT_STRING_FIELD);
CompositePath compositePath13 = new CompositePath();
compositePath13.setPath("/" + MEDIUM_STRING_FIELD);
CompositePath compositePath14 = new CompositePath();
compositePath14.setPath("/" + LONG_STRING_FIELD);
compositeIndexLongStrings.add(compositePath11);
compositeIndexLongStrings.add(compositePath12);
compositeIndexLongStrings.add(compositePath13);
compositeIndexLongStrings.add(compositePath14);
compositeIndexes.add(compositeIndexSimple);
compositeIndexes.add(compositeIndexMaxColumns);
compositeIndexes.add(compositeIndexPrimitiveValues);
compositeIndexes.add(compositeIndexLongStrings);
indexingPolicy.setCompositeIndexes(compositeIndexes);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static CosmosAsyncContainer createCollection(CosmosAsyncClient client, String dbId, CosmosContainerProperties collectionDefinition) {
CosmosAsyncDatabase database = client.getDatabase(dbId);
database.createContainer(collectionDefinition).block();
return database.getContainer(collectionDefinition.getId());
}
public static void deleteCollection(CosmosAsyncClient client, String dbId, String collectionId) {
client.getDatabase(dbId).getContainer(collectionId).delete().block();
}
public static InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, InternalObjectNode item) {
return BridgeInternal.getProperties(cosmosContainer.createItem(item).block());
}
public <T> Flux<CosmosItemResponse<T>> bulkInsert(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList,
int concurrencyLevel) {
List<Mono<CosmosItemResponse<T>>> result =
new ArrayList<>(documentDefinitionList.size());
for (T docDef : documentDefinitionList) {
result.add(cosmosContainer.createItem(docDef));
}
return Flux.merge(Flux.fromIterable(result), concurrencyLevel);
}
public <T> List<T> bulkInsertBlocking(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList) {
return bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.map(itemResponse -> itemResponse.getItem())
.collectList()
.block();
}
public <T> void voidBulkInsertBlocking(CosmosAsyncContainer cosmosContainer, List<T> documentDefinitionList) {
bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.then()
.block();
}
public static CosmosAsyncUser createUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties userSettings) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
CosmosUserResponse userResponse = database.createUser(userSettings).block();
return database.getUser(userResponse.getProperties().getId());
}
public static CosmosAsyncUser safeCreateUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties user) {
deleteUserIfExists(client, databaseId, user.getId());
return createUser(client, databaseId, user);
}
private static CosmosAsyncContainer safeCreateCollection(CosmosAsyncClient client, String databaseId, CosmosContainerProperties collection, CosmosContainerRequestOptions options) {
deleteCollectionIfExists(client, databaseId, collection.getId());
return createCollection(client.getDatabase(databaseId), collection, options);
}
static protected CosmosContainerProperties getCollectionDefinitionWithFullFidelity() {
CosmosContainerProperties cosmosContainerProperties = getCollectionDefinition(UUID.randomUUID().toString());
cosmosContainerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(5)));
return cosmosContainerProperties;
}
static protected CosmosContainerProperties getCollectionDefinition() {
return getCollectionDefinition(UUID.randomUUID().toString());
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId, PartitionKeyDefinition partitionKeyDefinition) {
return new CosmosContainerProperties(collectionId, partitionKeyDefinition);
}
static protected CosmosContainerProperties getCollectionDefinitionForHashV2(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
partitionKeyDef.setVersion(PartitionKeyDefinitionVersion.V2);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndexWithIdAsPartitionKey() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/id"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/mypk"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex(List<String> partitionKeyPath) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
partitionKeyDef.setPaths(partitionKeyPath);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static void deleteCollectionIfExists(CosmosAsyncClient client, String databaseId, String collectionId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
List<CosmosContainerProperties> res = database.queryContainers(String.format("SELECT * FROM root r where r.id = '%s'", collectionId), null)
.collectList()
.block();
if (!res.isEmpty()) {
deleteCollection(database, collectionId);
}
}
public static void deleteCollection(CosmosAsyncDatabase cosmosDatabase, String collectionId) {
cosmosDatabase.getContainer(collectionId).delete().block();
}
public static void deleteCollection(CosmosAsyncContainer cosmosContainer) {
cosmosContainer.delete().block();
}
public static void deleteDocumentIfExists(CosmosAsyncClient client, String databaseId, String collectionId, String docId) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(docId));
CosmosAsyncContainer cosmosContainer = client.getDatabase(databaseId).getContainer(collectionId);
List<InternalObjectNode> res = cosmosContainer
.queryItems(String.format("SELECT * FROM root r where r.id = '%s'", docId), options, InternalObjectNode.class)
.byPage()
.flatMap(page -> Flux.fromIterable(page.getResults()))
.collectList().block();
if (!res.isEmpty()) {
deleteDocument(cosmosContainer, docId);
}
}
public static void safeDeleteDocument(CosmosAsyncContainer cosmosContainer, String documentId, Object partitionKey) {
if (cosmosContainer != null && documentId != null) {
try {
cosmosContainer.deleteItem(documentId, new PartitionKey(partitionKey)).block();
} catch (Exception e) {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null || dce.getStatusCode() != 404) {
throw e;
}
}
}
}
public static void deleteDocument(CosmosAsyncContainer cosmosContainer, String documentId) {
cosmosContainer.deleteItem(documentId, PartitionKey.NONE).block();
}
public static void deleteUserIfExists(CosmosAsyncClient client, String databaseId, String userId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
client.getDatabase(databaseId).read().block();
List<CosmosUserProperties> res = database
.queryUsers(String.format("SELECT * FROM root r where r.id = '%s'", userId), null)
.collectList().block();
if (!res.isEmpty()) {
deleteUser(database, userId);
}
}
public static void deleteUser(CosmosAsyncDatabase database, String userId) {
database.getUser(userId).delete().block();
}
static private CosmosAsyncDatabase safeCreateDatabase(CosmosAsyncClient client, CosmosDatabaseProperties databaseSettings) {
safeDeleteDatabase(client.getDatabase(databaseSettings.getId()));
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosAsyncDatabase createDatabase(CosmosAsyncClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosDatabase createSyncDatabase(CosmosClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
try {
client.createDatabase(databaseSettings);
return client.getDatabase(databaseSettings.getId());
} catch (CosmosException e) {
e.printStackTrace();
}
return null;
}
static protected CosmosAsyncDatabase createDatabaseIfNotExists(CosmosAsyncClient client, String databaseId) {
List<CosmosDatabaseProperties> res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null)
.collectList()
.block();
if (res.size() != 0) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
return database;
} else {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
}
static protected void safeDeleteDatabase(CosmosAsyncDatabase database) {
if (database != null) {
try {
database.delete().block();
} catch (Exception e) {
}
}
}
static protected void safeDeleteSyncDatabase(CosmosDatabase database) {
if (database != null) {
try {
logger.info("attempting to delete database ....");
database.delete();
logger.info("database deletion completed");
} catch (Exception e) {
logger.error("failed to delete sync database", e);
}
}
}
static protected void safeDeleteAllCollections(CosmosAsyncDatabase database) {
if (database != null) {
List<CosmosContainerProperties> collections = database.readAllContainers()
.collectList()
.block();
for(CosmosContainerProperties collection: collections) {
database.getContainer(collection.getId()).delete().block();
}
}
}
static protected void safeDeleteCollection(CosmosAsyncContainer collection) {
if (collection != null) {
try {
logger.info("attempting to delete container {}.{}....",
collection.getDatabase().getId(),
collection.getId());
collection.delete().block();
logger.info("Container {}.{} deletion completed",
collection.getDatabase().getId(),
collection.getId());
} catch (Exception e) {
boolean shouldLogAsError = true;
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
if (cosmosException.getStatusCode() == 404) {
shouldLogAsError = false;
logger.info(
"Container {}.{} does not exist anymore.",
collection.getDatabase().getId(),
collection.getId());
}
}
if (shouldLogAsError) {
logger.error("failed to delete sync container {}.{}",
collection.getDatabase().getId(),
collection.getId(),
e);
}
}
finally {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
}
static protected void safeDeleteCollection(CosmosAsyncDatabase database, String collectionId) {
if (database != null && collectionId != null) {
try {
safeDeleteCollection(database.getContainer(collectionId));
} catch (Exception e) {
}
}
}
static protected void safeCloseAsync(CosmosAsyncClient client) {
if (client != null) {
new Thread(() -> {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}).start();
}
}
static protected void safeClose(CosmosAsyncClient client) {
if (client != null) {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
static protected void safeCloseSyncClient(CosmosClient client) {
if (client != null) {
try {
logger.info("closing client ...");
client.close();
logger.info("closing client completed");
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator) {
validateSuccess(single, validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator, long timeout) {
validateSuccess(single.flux(), validator, timeout);
}
@SuppressWarnings("rawtypes")
public static <T extends CosmosResponse> void validateSuccess(Flux<T> flowable,
CosmosResponseValidator<T> validator, long timeout) {
TestSubscriber<T> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T, U extends CosmosResponse> void validateFailure(Mono<U> mono, FailureValidator validator)
throws InterruptedException {
validateFailure(mono.flux(), validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public static <T extends Resource, U extends CosmosResponse> void validateFailure(Flux<U> flowable,
FailureValidator validator, long timeout) throws InterruptedException {
TestSubscriber<CosmosResponse> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemSuccess(
Mono<T> responseMono, CosmosItemResponseValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemFailure(
Mono<T> responseMono, FailureValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
public <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator) {
validateQuerySuccess(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
validator.validate(testSubscriber.values());
}
public static <T> void validateQuerySuccessWithContinuationTokenAndSizes(
String query,
CosmosAsyncContainer container,
int[] pageSizes,
FeedResponseListValidator<T> validator,
Class<T> classType) {
for (int pageSize : pageSizes) {
List<FeedResponse<T>> receivedDocuments = queryWithContinuationTokens(query, container, pageSize, classType);
validator.validate(receivedDocuments);
}
}
public static <T> List<FeedResponse<T>> queryWithContinuationTokens(
String query,
CosmosAsyncContainer container,
int pageSize,
Class<T> classType) {
String requestContinuation = null;
List<String> continuationTokens = new ArrayList<String>();
List<FeedResponse<T>> responseList = new ArrayList<>();
do {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<T> queryObservable = container.queryItems(query, options, classType);
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
@SuppressWarnings("unchecked")
FeedResponse<T> firstPage = (FeedResponse<T>) testSubscriber.getEvents().get(0).get(0);
requestContinuation = firstPage.getContinuationToken();
responseList.add(firstPage);
continuationTokens.add(requestContinuation);
} while (requestContinuation != null);
return responseList;
}
public <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable, FailureValidator validator) {
validateQueryFailure(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable,
FailureValidator validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.getEvents().get(1)).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@DataProvider
public static Object[][] clientBuilders() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithGateway() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)},
{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}
};
}
@DataProvider
public static Object[][] clientBuilderSolelyDirectWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)}
};
}
static ConsistencyLevel parseConsistency(String consistency) {
if (consistency != null) {
consistency = CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency).trim();
return ConsistencyLevel.valueOf(consistency);
}
logger.error("INVALID configured test consistency [{}].", consistency);
throw new IllegalStateException("INVALID configured test consistency " + consistency);
}
static List<String> parsePreferredLocation(String preferredLocations) {
if (StringUtils.isEmpty(preferredLocations)) {
return null;
}
try {
return objectMapper.readValue(preferredLocations, new TypeReference<List<String>>() {
});
} catch (Exception e) {
logger.error("INVALID configured test preferredLocations [{}].", preferredLocations);
throw new IllegalStateException("INVALID configured test preferredLocations " + preferredLocations);
}
}
static List<Protocol> parseProtocols(String protocols) {
if (StringUtils.isEmpty(protocols)) {
return null;
}
List<Protocol> protocolList = new ArrayList<>();
try {
List<String> protocolStrings = objectMapper.readValue(protocols, new TypeReference<List<String>>() {
});
for(String protocol : protocolStrings) {
protocolList.add(Protocol.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, protocol)));
}
return protocolList;
} catch (Exception e) {
logger.error("INVALID configured test protocols [{}].", protocols);
throw new IllegalStateException("INVALID configured test protocols " + protocols);
}
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirect() {
return simpleClientBuildersWithDirect(true, true, true, toArray(protocols));
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectHttps() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcp() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithJustDirectTcp() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcpWithoutRetryOnThrottledRequests() {
return new Object[][]{
{ createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, false) },
{ createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, false) }
};
}
private static Object[][] simpleClientBuildersWithDirect(
boolean contentResponseOnWriteEnabled,
Protocol... protocols) {
return simpleClientBuildersWithDirect(true, contentResponseOnWriteEnabled, true, protocols);
}
private static Object[][] simpleClientBuildersWithDirect(
boolean includeGateway,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
List<ConsistencyLevel> testConsistencies = ImmutableList.of(ConsistencyLevel.EVENTUAL);
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(
consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
if (includeGateway) {
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
false,
null,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
}
return cosmosConfigurations.stream().map(b -> new Object[]{b}).collect(Collectors.toList()).toArray(new Object[0][]);
}
@DataProvider
public static Object[][] clientBuildersWithDirect() {
return clientBuildersWithDirectAllConsistencies(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectHttps() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcp() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return clientBuildersWithDirectAllConsistencies(false, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithContentResponseOnWriteEnabledAndDisabled() {
Object[][] clientBuildersWithDisabledContentResponseOnWrite =
clientBuildersWithDirectSession(false, true, Protocol.TCP);
Object[][] clientBuildersWithEnabledContentResponseOnWrite =
clientBuildersWithDirectSession(true, true, Protocol.TCP);
int length = clientBuildersWithDisabledContentResponseOnWrite.length
+ clientBuildersWithEnabledContentResponseOnWrite.length;
Object[][] clientBuilders = new Object[length][];
int index = 0;
for (int i = 0; i < clientBuildersWithDisabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithDisabledContentResponseOnWrite[i];
}
for (int i = 0; i < clientBuildersWithEnabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithEnabledContentResponseOnWrite[i];
}
return clientBuilders;
}
@DataProvider
public static Object[][] clientBuildersWithDirectSession() {
return clientBuildersWithDirectSession(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectSessionIncludeComputeGateway() {
Object[][] originalProviders = clientBuildersWithDirectSession(
true,
true,
toArray(protocols));
List<Object[]> providers = new ArrayList<>(Arrays.asList(originalProviders));
Object[] injectedProviderParameters = new Object[1];
CosmosClientBuilder builder = createGatewayRxDocumentClient(
TestConfigurations.HOST.replace(ROUTING_GATEWAY_EMULATOR_PORT, COMPUTE_GATEWAY_EMULATOR_PORT),
ConsistencyLevel.SESSION,
false,
null,
true,
true);
injectedProviderParameters[0] = builder;
providers.add(injectedProviderParameters);
Object[][] array = new Object[providers.size()][];
return providers.toArray(array);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpSession() {
return clientBuildersWithDirectSession(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuilderGatewaySession() {
return clientBuildersWithDirectSession(true, true);
}
static Protocol[] toArray(List<Protocol> protocols) {
return protocols.toArray(new Protocol[protocols.size()]);
}
private static Object[][] clientBuildersWithDirectSession(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
return clientBuildersWithDirect(new ArrayList<ConsistencyLevel>() {{
add(ConsistencyLevel.SESSION);
}}, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
private static Object[][] clientBuildersWithDirectAllConsistencies(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
return clientBuildersWithDirect(desiredConsistencies, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
static List<ConsistencyLevel> parseDesiredConsistencies(String consistencies) {
if (StringUtils.isEmpty(consistencies)) {
return null;
}
List<ConsistencyLevel> consistencyLevels = new ArrayList<>();
try {
List<String> consistencyStrings = objectMapper.readValue(consistencies, new TypeReference<List<String>>() {});
for(String consistency : consistencyStrings) {
consistencyLevels.add(ConsistencyLevel.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency)));
}
return consistencyLevels;
} catch (Exception e) {
logger.error("INVALID consistency test desiredConsistencies [{}].", consistencies);
throw new IllegalStateException("INVALID configured test desiredConsistencies " + consistencies);
}
}
@SuppressWarnings("fallthrough")
static List<ConsistencyLevel> allEqualOrLowerConsistencies(ConsistencyLevel accountConsistency) {
List<ConsistencyLevel> testConsistencies = new ArrayList<>();
switch (accountConsistency) {
case STRONG:
testConsistencies.add(ConsistencyLevel.STRONG);
case BOUNDED_STALENESS:
testConsistencies.add(ConsistencyLevel.BOUNDED_STALENESS);
case SESSION:
testConsistencies.add(ConsistencyLevel.SESSION);
case CONSISTENT_PREFIX:
testConsistencies.add(ConsistencyLevel.CONSISTENT_PREFIX);
case EVENTUAL:
testConsistencies.add(ConsistencyLevel.EVENTUAL);
break;
default:
throw new IllegalStateException("INVALID configured test consistency " + accountConsistency);
}
return testConsistencies;
}
private static Object[][] clientBuildersWithDirect(
List<ConsistencyLevel> testConsistencies,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
return cosmosConfigurations.stream().map(c -> new Object[]{c}).collect(Collectors.toList()).toArray(new Object[0][]);
}
static protected CosmosClientBuilder createGatewayHouseKeepingDocumentClient(boolean contentResponseOnWriteEnabled) {
ThrottlingRetryOptions options = new ThrottlingRetryOptions();
options.setMaxRetryWaitTime(Duration.ofSeconds(SUITE_SETUP_TIMEOUT));
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
return new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.throttlingRetryOptions(options)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(ConsistencyLevel.SESSION);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
return createGatewayRxDocumentClient(
TestConfigurations.HOST,
consistencyLevel,
multiMasterEnabled,
preferredRegions,
contentResponseOnWriteEnabled,
retryOnThrottledRequests);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
String endpoint,
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(endpoint)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.multipleWriteRegionsEnabled(multiMasterEnabled)
.preferredRegions(preferredRegions)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
ImplementationBridgeHelpers
.CosmosClientBuilderHelper
.getCosmosClientBuilderAccessor()
.buildConnectionPolicy(builder);
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
return builder;
}
static protected CosmosClientBuilder createGatewayRxDocumentClient() {
return createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true);
}
static protected CosmosClientBuilder createDirectRxDocumentClient(ConsistencyLevel consistencyLevel,
Protocol protocol,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.directMode(DirectConnectionConfig.getDefaultConfig())
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
if (preferredRegions != null) {
builder.preferredRegions(preferredRegions);
}
if (multiMasterEnabled && consistencyLevel == ConsistencyLevel.SESSION) {
builder.multipleWriteRegionsEnabled(true);
}
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
Configs configs = spy(new Configs());
doAnswer((Answer<Protocol>)invocation -> protocol).when(configs).getProtocol();
return injectConfigs(builder, configs);
}
protected int expectedNumberOfPages(int totalExpectedResult, int maxPageSize) {
return Math.max((totalExpectedResult + maxPageSize - 1 ) / maxPageSize, 1);
}
@DataProvider(name = "queryMetricsArgProvider")
public Object[][] queryMetricsArgProvider() {
return new Object[][]{
{true},
{false},
{null}
};
}
@DataProvider(name = "queryWithOrderByProvider")
public Object[][] queryWithOrderBy() {
return new Object[][]{
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c.id DESC", true },
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c._ts DESC", false }
};
}
public static CosmosClientBuilder copyCosmosClientBuilder(CosmosClientBuilder builder) {
return CosmosBridgeInternal.cloneCosmosClientBuilder(builder);
}
public byte[] decodeHexString(String string) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
for (int i = 0; i < string.length(); i+=2) {
int b = Integer.parseInt(string.substring(i, i + 2), 16);
outputStream.write(b);
}
return outputStream.toByteArray();
}
} | public static Object[][] simpleClientBuildersWithDirectTcpWithoutRetryOnThrottledRequests() { | protected static void truncateCollection(CosmosAsyncContainer cosmosContainer) {
CosmosContainerProperties cosmosContainerProperties = cosmosContainer.read().block().getProperties();
String cosmosContainerId = cosmosContainerProperties.getId();
logger.info("Truncating collection {} ...", cosmosContainerId);
List<String> paths = cosmosContainerProperties.getPartitionKeyDefinition().getPaths();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(-1);
int maxItemCount = 100;
logger.info("Truncating collection {} documents ...", cosmosContainer.getId());
cosmosContainer.queryItems("SELECT * FROM root", options, InternalObjectNode.class)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(doc -> {
PartitionKey partitionKey = null;
Object propertyValue = null;
if (paths != null && !paths.isEmpty()) {
List<String> pkPath = PathParser.getPathParts(paths.get(0));
propertyValue = ModelBridgeInternal.getObjectByPathFromJsonSerializable(doc, pkPath);
if (propertyValue == null) {
partitionKey = PartitionKey.NONE;
} else {
partitionKey = new PartitionKey(propertyValue);
}
} else {
partitionKey = new PartitionKey(null);
}
return cosmosContainer.deleteItem(doc.getId(), partitionKey);
}).then().block();
logger.info("Truncating collection {} triggers ...", cosmosContainerId);
cosmosContainer.getScripts().queryTriggers("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(trigger -> {
return cosmosContainer.getScripts().getTrigger(trigger.getId()).delete();
}).then().block();
logger.info("Truncating collection {} storedProcedures ...", cosmosContainerId);
cosmosContainer.getScripts().queryStoredProcedures("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(storedProcedure -> {
return cosmosContainer.getScripts().getStoredProcedure(storedProcedure.getId()).delete(new CosmosStoredProcedureRequestOptions());
}).then().block();
logger.info("Truncating collection {} udfs ...", cosmosContainerId);
cosmosContainer.getScripts().queryUserDefinedFunctions("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(udf -> {
return cosmosContainer.getScripts().getUserDefinedFunction(udf.getId()).delete();
}).then().block();
logger.info("Finished truncating collection {}.", cosmosContainerId);
}
@SuppressWarnings({"fallthrough"})
protected static void waitIfNeededForReplicasToCatchUp(CosmosClientBuilder clientBuilder) {
switch (CosmosBridgeInternal.getConsistencyLevel(clientBuilder)) {
case EVENTUAL:
case CONSISTENT_PREFIX:
logger.info(" additional wait in EVENTUAL mode so the replica catch up");
try {
TimeUnit.MILLISECONDS.sleep(WAIT_REPLICA_CATCH_UP_IN_MILLIS);
} catch (Exception e) {
logger.error("unexpected failure", e);
}
case SESSION:
case BOUNDED_STALENESS:
case STRONG:
default:
break;
}
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options, int throughput) {
database.createContainer(cosmosContainerProperties, ThroughputProperties.createManualThroughput(throughput), options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options) {
database.createContainer(cosmosContainerProperties, options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
private static CosmosContainerProperties getCollectionDefinitionMultiPartitionWithCompositeAndSpatialIndexes() {
final String NUMBER_FIELD = "numberField";
final String STRING_FIELD = "stringField";
final String NUMBER_FIELD_2 = "numberField2";
final String STRING_FIELD_2 = "stringField2";
final String BOOL_FIELD = "boolField";
final String NULL_FIELD = "nullField";
final String OBJECT_FIELD = "objectField";
final String ARRAY_FIELD = "arrayField";
final String SHORT_STRING_FIELD = "shortStringField";
final String MEDIUM_STRING_FIELD = "mediumStringField";
final String LONG_STRING_FIELD = "longStringField";
final String PARTITION_KEY = "pk";
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
ArrayList<String> partitionKeyPaths = new ArrayList<String>();
partitionKeyPaths.add("/" + PARTITION_KEY);
partitionKeyDefinition.setPaths(partitionKeyPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<List<CompositePath>> compositeIndexes = new ArrayList<>();
ArrayList<CompositePath> compositeIndexSimple = new ArrayList<CompositePath>();
CompositePath compositePath1 = new CompositePath();
compositePath1.setPath("/" + NUMBER_FIELD);
compositePath1.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath2 = new CompositePath();
compositePath2.setPath("/" + STRING_FIELD);
compositePath2.setOrder(CompositePathSortOrder.DESCENDING);
compositeIndexSimple.add(compositePath1);
compositeIndexSimple.add(compositePath2);
ArrayList<CompositePath> compositeIndexMaxColumns = new ArrayList<CompositePath>();
CompositePath compositePath3 = new CompositePath();
compositePath3.setPath("/" + NUMBER_FIELD);
compositePath3.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath4 = new CompositePath();
compositePath4.setPath("/" + STRING_FIELD);
compositePath4.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath5 = new CompositePath();
compositePath5.setPath("/" + NUMBER_FIELD_2);
compositePath5.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath6 = new CompositePath();
compositePath6.setPath("/" + STRING_FIELD_2);
compositePath6.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexMaxColumns.add(compositePath3);
compositeIndexMaxColumns.add(compositePath4);
compositeIndexMaxColumns.add(compositePath5);
compositeIndexMaxColumns.add(compositePath6);
ArrayList<CompositePath> compositeIndexPrimitiveValues = new ArrayList<CompositePath>();
CompositePath compositePath7 = new CompositePath();
compositePath7.setPath("/" + NUMBER_FIELD);
compositePath7.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath8 = new CompositePath();
compositePath8.setPath("/" + STRING_FIELD);
compositePath8.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath9 = new CompositePath();
compositePath9.setPath("/" + BOOL_FIELD);
compositePath9.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath10 = new CompositePath();
compositePath10.setPath("/" + NULL_FIELD);
compositePath10.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexPrimitiveValues.add(compositePath7);
compositeIndexPrimitiveValues.add(compositePath8);
compositeIndexPrimitiveValues.add(compositePath9);
compositeIndexPrimitiveValues.add(compositePath10);
ArrayList<CompositePath> compositeIndexLongStrings = new ArrayList<CompositePath>();
CompositePath compositePath11 = new CompositePath();
compositePath11.setPath("/" + STRING_FIELD);
CompositePath compositePath12 = new CompositePath();
compositePath12.setPath("/" + SHORT_STRING_FIELD);
CompositePath compositePath13 = new CompositePath();
compositePath13.setPath("/" + MEDIUM_STRING_FIELD);
CompositePath compositePath14 = new CompositePath();
compositePath14.setPath("/" + LONG_STRING_FIELD);
compositeIndexLongStrings.add(compositePath11);
compositeIndexLongStrings.add(compositePath12);
compositeIndexLongStrings.add(compositePath13);
compositeIndexLongStrings.add(compositePath14);
compositeIndexes.add(compositeIndexSimple);
compositeIndexes.add(compositeIndexMaxColumns);
compositeIndexes.add(compositeIndexPrimitiveValues);
compositeIndexes.add(compositeIndexLongStrings);
indexingPolicy.setCompositeIndexes(compositeIndexes);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static CosmosAsyncContainer createCollection(CosmosAsyncClient client, String dbId, CosmosContainerProperties collectionDefinition) {
CosmosAsyncDatabase database = client.getDatabase(dbId);
database.createContainer(collectionDefinition).block();
return database.getContainer(collectionDefinition.getId());
}
public static void deleteCollection(CosmosAsyncClient client, String dbId, String collectionId) {
client.getDatabase(dbId).getContainer(collectionId).delete().block();
}
public static InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, InternalObjectNode item) {
return BridgeInternal.getProperties(cosmosContainer.createItem(item).block());
}
public <T> Flux<CosmosItemResponse<T>> bulkInsert(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList,
int concurrencyLevel) {
List<Mono<CosmosItemResponse<T>>> result =
new ArrayList<>(documentDefinitionList.size());
for (T docDef : documentDefinitionList) {
result.add(cosmosContainer.createItem(docDef));
}
return Flux.merge(Flux.fromIterable(result), concurrencyLevel);
}
public <T> List<T> bulkInsertBlocking(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList) {
return bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.map(itemResponse -> itemResponse.getItem())
.collectList()
.block();
}
public <T> void voidBulkInsertBlocking(CosmosAsyncContainer cosmosContainer, List<T> documentDefinitionList) {
bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.then()
.block();
}
public static CosmosAsyncUser createUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties userSettings) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
CosmosUserResponse userResponse = database.createUser(userSettings).block();
return database.getUser(userResponse.getProperties().getId());
}
public static CosmosAsyncUser safeCreateUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties user) {
deleteUserIfExists(client, databaseId, user.getId());
return createUser(client, databaseId, user);
}
private static CosmosAsyncContainer safeCreateCollection(CosmosAsyncClient client, String databaseId, CosmosContainerProperties collection, CosmosContainerRequestOptions options) {
deleteCollectionIfExists(client, databaseId, collection.getId());
return createCollection(client.getDatabase(databaseId), collection, options);
}
static protected CosmosContainerProperties getCollectionDefinitionWithFullFidelity() {
CosmosContainerProperties cosmosContainerProperties = getCollectionDefinition(UUID.randomUUID().toString());
cosmosContainerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(5)));
return cosmosContainerProperties;
}
static protected CosmosContainerProperties getCollectionDefinition() {
return getCollectionDefinition(UUID.randomUUID().toString());
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId, PartitionKeyDefinition partitionKeyDefinition) {
return new CosmosContainerProperties(collectionId, partitionKeyDefinition);
}
static protected CosmosContainerProperties getCollectionDefinitionForHashV2(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
partitionKeyDef.setVersion(PartitionKeyDefinitionVersion.V2);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndexWithIdAsPartitionKey() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/id"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/mypk"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex(List<String> partitionKeyPath) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
partitionKeyDef.setPaths(partitionKeyPath);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static void deleteCollectionIfExists(CosmosAsyncClient client, String databaseId, String collectionId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
List<CosmosContainerProperties> res = database.queryContainers(String.format("SELECT * FROM root r where r.id = '%s'", collectionId), null)
.collectList()
.block();
if (!res.isEmpty()) {
deleteCollection(database, collectionId);
}
}
public static void deleteCollection(CosmosAsyncDatabase cosmosDatabase, String collectionId) {
cosmosDatabase.getContainer(collectionId).delete().block();
}
public static void deleteCollection(CosmosAsyncContainer cosmosContainer) {
cosmosContainer.delete().block();
}
public static void deleteDocumentIfExists(CosmosAsyncClient client, String databaseId, String collectionId, String docId) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(docId));
CosmosAsyncContainer cosmosContainer = client.getDatabase(databaseId).getContainer(collectionId);
List<InternalObjectNode> res = cosmosContainer
.queryItems(String.format("SELECT * FROM root r where r.id = '%s'", docId), options, InternalObjectNode.class)
.byPage()
.flatMap(page -> Flux.fromIterable(page.getResults()))
.collectList().block();
if (!res.isEmpty()) {
deleteDocument(cosmosContainer, docId);
}
}
public static void safeDeleteDocument(CosmosAsyncContainer cosmosContainer, String documentId, Object partitionKey) {
if (cosmosContainer != null && documentId != null) {
try {
cosmosContainer.deleteItem(documentId, new PartitionKey(partitionKey)).block();
} catch (Exception e) {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null || dce.getStatusCode() != 404) {
throw e;
}
}
}
}
public static void deleteDocument(CosmosAsyncContainer cosmosContainer, String documentId) {
cosmosContainer.deleteItem(documentId, PartitionKey.NONE).block();
}
public static void deleteUserIfExists(CosmosAsyncClient client, String databaseId, String userId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
client.getDatabase(databaseId).read().block();
List<CosmosUserProperties> res = database
.queryUsers(String.format("SELECT * FROM root r where r.id = '%s'", userId), null)
.collectList().block();
if (!res.isEmpty()) {
deleteUser(database, userId);
}
}
public static void deleteUser(CosmosAsyncDatabase database, String userId) {
database.getUser(userId).delete().block();
}
static private CosmosAsyncDatabase safeCreateDatabase(CosmosAsyncClient client, CosmosDatabaseProperties databaseSettings) {
safeDeleteDatabase(client.getDatabase(databaseSettings.getId()));
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosAsyncDatabase createDatabase(CosmosAsyncClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosDatabase createSyncDatabase(CosmosClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
try {
client.createDatabase(databaseSettings);
return client.getDatabase(databaseSettings.getId());
} catch (CosmosException e) {
e.printStackTrace();
}
return null;
}
static protected CosmosAsyncDatabase createDatabaseIfNotExists(CosmosAsyncClient client, String databaseId) {
List<CosmosDatabaseProperties> res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null)
.collectList()
.block();
if (res.size() != 0) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
return database;
} else {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
}
static protected void safeDeleteDatabase(CosmosAsyncDatabase database) {
if (database != null) {
try {
database.delete().block();
} catch (Exception e) {
}
}
}
static protected void safeDeleteSyncDatabase(CosmosDatabase database) {
if (database != null) {
try {
logger.info("attempting to delete database ....");
database.delete();
logger.info("database deletion completed");
} catch (Exception e) {
logger.error("failed to delete sync database", e);
}
}
}
static protected void safeDeleteAllCollections(CosmosAsyncDatabase database) {
if (database != null) {
List<CosmosContainerProperties> collections = database.readAllContainers()
.collectList()
.block();
for(CosmosContainerProperties collection: collections) {
database.getContainer(collection.getId()).delete().block();
}
}
}
static protected void safeDeleteCollection(CosmosAsyncContainer collection) {
if (collection != null) {
try {
logger.info("attempting to delete container {}.{}....",
collection.getDatabase().getId(),
collection.getId());
collection.delete().block();
logger.info("Container {}.{} deletion completed",
collection.getDatabase().getId(),
collection.getId());
} catch (Exception e) {
boolean shouldLogAsError = true;
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
if (cosmosException.getStatusCode() == 404) {
shouldLogAsError = false;
logger.info(
"Container {}.{} does not exist anymore.",
collection.getDatabase().getId(),
collection.getId());
}
}
if (shouldLogAsError) {
logger.error("failed to delete sync container {}.{}",
collection.getDatabase().getId(),
collection.getId(),
e);
}
}
finally {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
}
static protected void safeDeleteCollection(CosmosAsyncDatabase database, String collectionId) {
if (database != null && collectionId != null) {
try {
safeDeleteCollection(database.getContainer(collectionId));
} catch (Exception e) {
}
}
}
static protected void safeCloseAsync(CosmosAsyncClient client) {
if (client != null) {
new Thread(() -> {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}).start();
}
}
static protected void safeClose(CosmosAsyncClient client) {
if (client != null) {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
static protected void safeCloseSyncClient(CosmosClient client) {
if (client != null) {
try {
logger.info("closing client ...");
client.close();
logger.info("closing client completed");
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator) {
validateSuccess(single, validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator, long timeout) {
validateSuccess(single.flux(), validator, timeout);
}
@SuppressWarnings("rawtypes")
public static <T extends CosmosResponse> void validateSuccess(Flux<T> flowable,
CosmosResponseValidator<T> validator, long timeout) {
TestSubscriber<T> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T, U extends CosmosResponse> void validateFailure(Mono<U> mono, FailureValidator validator)
throws InterruptedException {
validateFailure(mono.flux(), validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public static <T extends Resource, U extends CosmosResponse> void validateFailure(Flux<U> flowable,
FailureValidator validator, long timeout) throws InterruptedException {
TestSubscriber<CosmosResponse> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemSuccess(
Mono<T> responseMono, CosmosItemResponseValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemFailure(
Mono<T> responseMono, FailureValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
public <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator) {
validateQuerySuccess(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
validator.validate(testSubscriber.values());
}
public static <T> void validateQuerySuccessWithContinuationTokenAndSizes(
String query,
CosmosAsyncContainer container,
int[] pageSizes,
FeedResponseListValidator<T> validator,
Class<T> classType) {
for (int pageSize : pageSizes) {
List<FeedResponse<T>> receivedDocuments = queryWithContinuationTokens(query, container, pageSize, classType);
validator.validate(receivedDocuments);
}
}
public static <T> List<FeedResponse<T>> queryWithContinuationTokens(
String query,
CosmosAsyncContainer container,
int pageSize,
Class<T> classType) {
String requestContinuation = null;
List<String> continuationTokens = new ArrayList<String>();
List<FeedResponse<T>> responseList = new ArrayList<>();
do {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<T> queryObservable = container.queryItems(query, options, classType);
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
@SuppressWarnings("unchecked")
FeedResponse<T> firstPage = (FeedResponse<T>) testSubscriber.getEvents().get(0).get(0);
requestContinuation = firstPage.getContinuationToken();
responseList.add(firstPage);
continuationTokens.add(requestContinuation);
} while (requestContinuation != null);
return responseList;
}
public <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable, FailureValidator validator) {
validateQueryFailure(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable,
FailureValidator validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.getEvents().get(1)).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@DataProvider
public static Object[][] clientBuilders() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithGateway() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)},
{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}
};
}
@DataProvider
public static Object[][] clientBuilderSolelyDirectWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)}
};
}
static ConsistencyLevel parseConsistency(String consistency) {
if (consistency != null) {
consistency = CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency).trim();
return ConsistencyLevel.valueOf(consistency);
}
logger.error("INVALID configured test consistency [{}].", consistency);
throw new IllegalStateException("INVALID configured test consistency " + consistency);
}
static List<String> parsePreferredLocation(String preferredLocations) {
if (StringUtils.isEmpty(preferredLocations)) {
return null;
}
try {
return objectMapper.readValue(preferredLocations, new TypeReference<List<String>>() {
});
} catch (Exception e) {
logger.error("INVALID configured test preferredLocations [{}].", preferredLocations);
throw new IllegalStateException("INVALID configured test preferredLocations " + preferredLocations);
}
}
static List<Protocol> parseProtocols(String protocols) {
if (StringUtils.isEmpty(protocols)) {
return null;
}
List<Protocol> protocolList = new ArrayList<>();
try {
List<String> protocolStrings = objectMapper.readValue(protocols, new TypeReference<List<String>>() {
});
for(String protocol : protocolStrings) {
protocolList.add(Protocol.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, protocol)));
}
return protocolList;
} catch (Exception e) {
logger.error("INVALID configured test protocols [{}].", protocols);
throw new IllegalStateException("INVALID configured test protocols " + protocols);
}
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirect() {
return simpleClientBuildersWithDirect(true, true, true, toArray(protocols));
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectHttps() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcp() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithJustDirectTcp() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithoutRetryOnThrottledRequests() {
return new Object[][]{
{ createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, false) },
{ createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, false) }
};
}
private static Object[][] simpleClientBuildersWithDirect(
boolean contentResponseOnWriteEnabled,
Protocol... protocols) {
return simpleClientBuildersWithDirect(true, contentResponseOnWriteEnabled, true, protocols);
}
private static Object[][] simpleClientBuildersWithDirect(
boolean includeGateway,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
List<ConsistencyLevel> testConsistencies = ImmutableList.of(ConsistencyLevel.EVENTUAL);
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(
consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
if (includeGateway) {
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
false,
null,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
}
return cosmosConfigurations.stream().map(b -> new Object[]{b}).collect(Collectors.toList()).toArray(new Object[0][]);
}
@DataProvider
public static Object[][] clientBuildersWithDirect() {
return clientBuildersWithDirectAllConsistencies(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectHttps() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcp() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return clientBuildersWithDirectAllConsistencies(false, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithContentResponseOnWriteEnabledAndDisabled() {
Object[][] clientBuildersWithDisabledContentResponseOnWrite =
clientBuildersWithDirectSession(false, true, Protocol.TCP);
Object[][] clientBuildersWithEnabledContentResponseOnWrite =
clientBuildersWithDirectSession(true, true, Protocol.TCP);
int length = clientBuildersWithDisabledContentResponseOnWrite.length
+ clientBuildersWithEnabledContentResponseOnWrite.length;
Object[][] clientBuilders = new Object[length][];
int index = 0;
for (int i = 0; i < clientBuildersWithDisabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithDisabledContentResponseOnWrite[i];
}
for (int i = 0; i < clientBuildersWithEnabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithEnabledContentResponseOnWrite[i];
}
return clientBuilders;
}
@DataProvider
public static Object[][] clientBuildersWithDirectSession() {
return clientBuildersWithDirectSession(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectSessionIncludeComputeGateway() {
Object[][] originalProviders = clientBuildersWithDirectSession(
true,
true,
toArray(protocols));
List<Object[]> providers = new ArrayList<>(Arrays.asList(originalProviders));
Object[] injectedProviderParameters = new Object[1];
CosmosClientBuilder builder = createGatewayRxDocumentClient(
TestConfigurations.HOST.replace(ROUTING_GATEWAY_EMULATOR_PORT, COMPUTE_GATEWAY_EMULATOR_PORT),
ConsistencyLevel.SESSION,
false,
null,
true,
true);
injectedProviderParameters[0] = builder;
providers.add(injectedProviderParameters);
Object[][] array = new Object[providers.size()][];
return providers.toArray(array);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpSession() {
return clientBuildersWithDirectSession(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuilderGatewaySession() {
return clientBuildersWithDirectSession(true, true);
}
static Protocol[] toArray(List<Protocol> protocols) {
return protocols.toArray(new Protocol[protocols.size()]);
}
private static Object[][] clientBuildersWithDirectSession(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
return clientBuildersWithDirect(new ArrayList<ConsistencyLevel>() {{
add(ConsistencyLevel.SESSION);
}}, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
private static Object[][] clientBuildersWithDirectAllConsistencies(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
return clientBuildersWithDirect(desiredConsistencies, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
static List<ConsistencyLevel> parseDesiredConsistencies(String consistencies) {
if (StringUtils.isEmpty(consistencies)) {
return null;
}
List<ConsistencyLevel> consistencyLevels = new ArrayList<>();
try {
List<String> consistencyStrings = objectMapper.readValue(consistencies, new TypeReference<List<String>>() {});
for(String consistency : consistencyStrings) {
consistencyLevels.add(ConsistencyLevel.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency)));
}
return consistencyLevels;
} catch (Exception e) {
logger.error("INVALID consistency test desiredConsistencies [{}].", consistencies);
throw new IllegalStateException("INVALID configured test desiredConsistencies " + consistencies);
}
}
@SuppressWarnings("fallthrough")
static List<ConsistencyLevel> allEqualOrLowerConsistencies(ConsistencyLevel accountConsistency) {
List<ConsistencyLevel> testConsistencies = new ArrayList<>();
switch (accountConsistency) {
case STRONG:
testConsistencies.add(ConsistencyLevel.STRONG);
case BOUNDED_STALENESS:
testConsistencies.add(ConsistencyLevel.BOUNDED_STALENESS);
case SESSION:
testConsistencies.add(ConsistencyLevel.SESSION);
case CONSISTENT_PREFIX:
testConsistencies.add(ConsistencyLevel.CONSISTENT_PREFIX);
case EVENTUAL:
testConsistencies.add(ConsistencyLevel.EVENTUAL);
break;
default:
throw new IllegalStateException("INVALID configured test consistency " + accountConsistency);
}
return testConsistencies;
}
private static Object[][] clientBuildersWithDirect(
List<ConsistencyLevel> testConsistencies,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
return cosmosConfigurations.stream().map(c -> new Object[]{c}).collect(Collectors.toList()).toArray(new Object[0][]);
}
static protected CosmosClientBuilder createGatewayHouseKeepingDocumentClient(boolean contentResponseOnWriteEnabled) {
ThrottlingRetryOptions options = new ThrottlingRetryOptions();
options.setMaxRetryWaitTime(Duration.ofSeconds(SUITE_SETUP_TIMEOUT));
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
return new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.throttlingRetryOptions(options)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(ConsistencyLevel.SESSION);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
return createGatewayRxDocumentClient(
TestConfigurations.HOST,
consistencyLevel,
multiMasterEnabled,
preferredRegions,
contentResponseOnWriteEnabled,
retryOnThrottledRequests);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
String endpoint,
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(endpoint)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.multipleWriteRegionsEnabled(multiMasterEnabled)
.preferredRegions(preferredRegions)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
ImplementationBridgeHelpers
.CosmosClientBuilderHelper
.getCosmosClientBuilderAccessor()
.buildConnectionPolicy(builder);
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
return builder;
}
static protected CosmosClientBuilder createGatewayRxDocumentClient() {
return createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true);
}
static protected CosmosClientBuilder createDirectRxDocumentClient(ConsistencyLevel consistencyLevel,
Protocol protocol,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.directMode(DirectConnectionConfig.getDefaultConfig())
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
if (preferredRegions != null) {
builder.preferredRegions(preferredRegions);
}
if (multiMasterEnabled && consistencyLevel == ConsistencyLevel.SESSION) {
builder.multipleWriteRegionsEnabled(true);
}
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
Configs configs = spy(new Configs());
doAnswer((Answer<Protocol>)invocation -> protocol).when(configs).getProtocol();
return injectConfigs(builder, configs);
}
protected int expectedNumberOfPages(int totalExpectedResult, int maxPageSize) {
return Math.max((totalExpectedResult + maxPageSize - 1 ) / maxPageSize, 1);
}
@DataProvider(name = "queryMetricsArgProvider")
public Object[][] queryMetricsArgProvider() {
return new Object[][]{
{true},
{false},
{null}
};
}
@DataProvider(name = "queryWithOrderByProvider")
public Object[][] queryWithOrderBy() {
return new Object[][]{
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c.id DESC", true },
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c._ts DESC", false }
};
}
public static CosmosClientBuilder copyCosmosClientBuilder(CosmosClientBuilder builder) {
return CosmosBridgeInternal.cloneCosmosClientBuilder(builder);
}
public byte[] decodeHexString(String string) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
for (int i = 0; i < string.length(); i+=2) {
int b = Integer.parseInt(string.substring(i, i + 2), 16);
outputStream.write(b);
}
return outputStream.toByteArray();
}
} | class DatabaseManagerImpl implements CosmosDatabaseForTest.DatabaseManager {
public static DatabaseManagerImpl getInstance(CosmosAsyncClient client) {
return new DatabaseManagerImpl(client);
}
private final CosmosAsyncClient client;
private DatabaseManagerImpl(CosmosAsyncClient client) {
this.client = client;
}
@Override
public CosmosPagedFlux<CosmosDatabaseProperties> queryDatabases(SqlQuerySpec query) {
return client.queryDatabases(query, null);
}
@Override
public Mono<CosmosDatabaseResponse> createDatabase(CosmosDatabaseProperties databaseDefinition) {
return client.createDatabase(databaseDefinition);
}
@Override
public CosmosAsyncDatabase getDatabase(String id) {
return client.getDatabase(id);
}
} | class DatabaseManagerImpl implements CosmosDatabaseForTest.DatabaseManager {
public static DatabaseManagerImpl getInstance(CosmosAsyncClient client) {
return new DatabaseManagerImpl(client);
}
private final CosmosAsyncClient client;
private DatabaseManagerImpl(CosmosAsyncClient client) {
this.client = client;
}
@Override
public CosmosPagedFlux<CosmosDatabaseProperties> queryDatabases(SqlQuerySpec query) {
return client.queryDatabases(query, null);
}
@Override
public Mono<CosmosDatabaseResponse> createDatabase(CosmosDatabaseProperties databaseDefinition) {
return client.createDatabase(databaseDefinition);
}
@Override
public CosmosAsyncDatabase getDatabase(String id) {
return client.getDatabase(id);
}
} |
make sense, will update | protected static void truncateCollection(CosmosAsyncContainer cosmosContainer) {
CosmosContainerProperties cosmosContainerProperties = cosmosContainer.read().block().getProperties();
String cosmosContainerId = cosmosContainerProperties.getId();
logger.info("Truncating collection {} ...", cosmosContainerId);
List<String> paths = cosmosContainerProperties.getPartitionKeyDefinition().getPaths();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(-1);
int maxItemCount = 100;
logger.info("Truncating collection {} documents ...", cosmosContainer.getId());
cosmosContainer.queryItems("SELECT * FROM root", options, InternalObjectNode.class)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(doc -> {
PartitionKey partitionKey = null;
Object propertyValue = null;
if (paths != null && !paths.isEmpty()) {
List<String> pkPath = PathParser.getPathParts(paths.get(0));
propertyValue = ModelBridgeInternal.getObjectByPathFromJsonSerializable(doc, pkPath);
if (propertyValue == null) {
partitionKey = PartitionKey.NONE;
} else {
partitionKey = new PartitionKey(propertyValue);
}
} else {
partitionKey = new PartitionKey(null);
}
return cosmosContainer.deleteItem(doc.getId(), partitionKey);
}).then().block();
logger.info("Truncating collection {} triggers ...", cosmosContainerId);
cosmosContainer.getScripts().queryTriggers("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(trigger -> {
return cosmosContainer.getScripts().getTrigger(trigger.getId()).delete();
}).then().block();
logger.info("Truncating collection {} storedProcedures ...", cosmosContainerId);
cosmosContainer.getScripts().queryStoredProcedures("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(storedProcedure -> {
return cosmosContainer.getScripts().getStoredProcedure(storedProcedure.getId()).delete(new CosmosStoredProcedureRequestOptions());
}).then().block();
logger.info("Truncating collection {} udfs ...", cosmosContainerId);
cosmosContainer.getScripts().queryUserDefinedFunctions("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(udf -> {
return cosmosContainer.getScripts().getUserDefinedFunction(udf.getId()).delete();
}).then().block();
logger.info("Finished truncating collection {}.", cosmosContainerId);
}
@SuppressWarnings({"fallthrough"})
protected static void waitIfNeededForReplicasToCatchUp(CosmosClientBuilder clientBuilder) {
switch (CosmosBridgeInternal.getConsistencyLevel(clientBuilder)) {
case EVENTUAL:
case CONSISTENT_PREFIX:
logger.info(" additional wait in EVENTUAL mode so the replica catch up");
try {
TimeUnit.MILLISECONDS.sleep(WAIT_REPLICA_CATCH_UP_IN_MILLIS);
} catch (Exception e) {
logger.error("unexpected failure", e);
}
case SESSION:
case BOUNDED_STALENESS:
case STRONG:
default:
break;
}
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options, int throughput) {
database.createContainer(cosmosContainerProperties, ThroughputProperties.createManualThroughput(throughput), options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options) {
database.createContainer(cosmosContainerProperties, options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
private static CosmosContainerProperties getCollectionDefinitionMultiPartitionWithCompositeAndSpatialIndexes() {
final String NUMBER_FIELD = "numberField";
final String STRING_FIELD = "stringField";
final String NUMBER_FIELD_2 = "numberField2";
final String STRING_FIELD_2 = "stringField2";
final String BOOL_FIELD = "boolField";
final String NULL_FIELD = "nullField";
final String OBJECT_FIELD = "objectField";
final String ARRAY_FIELD = "arrayField";
final String SHORT_STRING_FIELD = "shortStringField";
final String MEDIUM_STRING_FIELD = "mediumStringField";
final String LONG_STRING_FIELD = "longStringField";
final String PARTITION_KEY = "pk";
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
ArrayList<String> partitionKeyPaths = new ArrayList<String>();
partitionKeyPaths.add("/" + PARTITION_KEY);
partitionKeyDefinition.setPaths(partitionKeyPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<List<CompositePath>> compositeIndexes = new ArrayList<>();
ArrayList<CompositePath> compositeIndexSimple = new ArrayList<CompositePath>();
CompositePath compositePath1 = new CompositePath();
compositePath1.setPath("/" + NUMBER_FIELD);
compositePath1.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath2 = new CompositePath();
compositePath2.setPath("/" + STRING_FIELD);
compositePath2.setOrder(CompositePathSortOrder.DESCENDING);
compositeIndexSimple.add(compositePath1);
compositeIndexSimple.add(compositePath2);
ArrayList<CompositePath> compositeIndexMaxColumns = new ArrayList<CompositePath>();
CompositePath compositePath3 = new CompositePath();
compositePath3.setPath("/" + NUMBER_FIELD);
compositePath3.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath4 = new CompositePath();
compositePath4.setPath("/" + STRING_FIELD);
compositePath4.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath5 = new CompositePath();
compositePath5.setPath("/" + NUMBER_FIELD_2);
compositePath5.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath6 = new CompositePath();
compositePath6.setPath("/" + STRING_FIELD_2);
compositePath6.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexMaxColumns.add(compositePath3);
compositeIndexMaxColumns.add(compositePath4);
compositeIndexMaxColumns.add(compositePath5);
compositeIndexMaxColumns.add(compositePath6);
ArrayList<CompositePath> compositeIndexPrimitiveValues = new ArrayList<CompositePath>();
CompositePath compositePath7 = new CompositePath();
compositePath7.setPath("/" + NUMBER_FIELD);
compositePath7.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath8 = new CompositePath();
compositePath8.setPath("/" + STRING_FIELD);
compositePath8.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath9 = new CompositePath();
compositePath9.setPath("/" + BOOL_FIELD);
compositePath9.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath10 = new CompositePath();
compositePath10.setPath("/" + NULL_FIELD);
compositePath10.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexPrimitiveValues.add(compositePath7);
compositeIndexPrimitiveValues.add(compositePath8);
compositeIndexPrimitiveValues.add(compositePath9);
compositeIndexPrimitiveValues.add(compositePath10);
ArrayList<CompositePath> compositeIndexLongStrings = new ArrayList<CompositePath>();
CompositePath compositePath11 = new CompositePath();
compositePath11.setPath("/" + STRING_FIELD);
CompositePath compositePath12 = new CompositePath();
compositePath12.setPath("/" + SHORT_STRING_FIELD);
CompositePath compositePath13 = new CompositePath();
compositePath13.setPath("/" + MEDIUM_STRING_FIELD);
CompositePath compositePath14 = new CompositePath();
compositePath14.setPath("/" + LONG_STRING_FIELD);
compositeIndexLongStrings.add(compositePath11);
compositeIndexLongStrings.add(compositePath12);
compositeIndexLongStrings.add(compositePath13);
compositeIndexLongStrings.add(compositePath14);
compositeIndexes.add(compositeIndexSimple);
compositeIndexes.add(compositeIndexMaxColumns);
compositeIndexes.add(compositeIndexPrimitiveValues);
compositeIndexes.add(compositeIndexLongStrings);
indexingPolicy.setCompositeIndexes(compositeIndexes);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static CosmosAsyncContainer createCollection(CosmosAsyncClient client, String dbId, CosmosContainerProperties collectionDefinition) {
CosmosAsyncDatabase database = client.getDatabase(dbId);
database.createContainer(collectionDefinition).block();
return database.getContainer(collectionDefinition.getId());
}
public static void deleteCollection(CosmosAsyncClient client, String dbId, String collectionId) {
client.getDatabase(dbId).getContainer(collectionId).delete().block();
}
public static InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, InternalObjectNode item) {
return BridgeInternal.getProperties(cosmosContainer.createItem(item).block());
}
public <T> Flux<CosmosItemResponse<T>> bulkInsert(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList,
int concurrencyLevel) {
List<Mono<CosmosItemResponse<T>>> result =
new ArrayList<>(documentDefinitionList.size());
for (T docDef : documentDefinitionList) {
result.add(cosmosContainer.createItem(docDef));
}
return Flux.merge(Flux.fromIterable(result), concurrencyLevel);
}
public <T> List<T> bulkInsertBlocking(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList) {
return bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.map(itemResponse -> itemResponse.getItem())
.collectList()
.block();
}
public <T> void voidBulkInsertBlocking(CosmosAsyncContainer cosmosContainer, List<T> documentDefinitionList) {
bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.then()
.block();
}
public static CosmosAsyncUser createUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties userSettings) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
CosmosUserResponse userResponse = database.createUser(userSettings).block();
return database.getUser(userResponse.getProperties().getId());
}
public static CosmosAsyncUser safeCreateUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties user) {
deleteUserIfExists(client, databaseId, user.getId());
return createUser(client, databaseId, user);
}
private static CosmosAsyncContainer safeCreateCollection(CosmosAsyncClient client, String databaseId, CosmosContainerProperties collection, CosmosContainerRequestOptions options) {
deleteCollectionIfExists(client, databaseId, collection.getId());
return createCollection(client.getDatabase(databaseId), collection, options);
}
static protected CosmosContainerProperties getCollectionDefinitionWithFullFidelity() {
CosmosContainerProperties cosmosContainerProperties = getCollectionDefinition(UUID.randomUUID().toString());
cosmosContainerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(5)));
return cosmosContainerProperties;
}
static protected CosmosContainerProperties getCollectionDefinition() {
return getCollectionDefinition(UUID.randomUUID().toString());
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId, PartitionKeyDefinition partitionKeyDefinition) {
return new CosmosContainerProperties(collectionId, partitionKeyDefinition);
}
static protected CosmosContainerProperties getCollectionDefinitionForHashV2(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
partitionKeyDef.setVersion(PartitionKeyDefinitionVersion.V2);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndexWithIdAsPartitionKey() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/id"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/mypk"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex(List<String> partitionKeyPath) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
partitionKeyDef.setPaths(partitionKeyPath);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static void deleteCollectionIfExists(CosmosAsyncClient client, String databaseId, String collectionId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
List<CosmosContainerProperties> res = database.queryContainers(String.format("SELECT * FROM root r where r.id = '%s'", collectionId), null)
.collectList()
.block();
if (!res.isEmpty()) {
deleteCollection(database, collectionId);
}
}
public static void deleteCollection(CosmosAsyncDatabase cosmosDatabase, String collectionId) {
cosmosDatabase.getContainer(collectionId).delete().block();
}
public static void deleteCollection(CosmosAsyncContainer cosmosContainer) {
cosmosContainer.delete().block();
}
public static void deleteDocumentIfExists(CosmosAsyncClient client, String databaseId, String collectionId, String docId) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(docId));
CosmosAsyncContainer cosmosContainer = client.getDatabase(databaseId).getContainer(collectionId);
List<InternalObjectNode> res = cosmosContainer
.queryItems(String.format("SELECT * FROM root r where r.id = '%s'", docId), options, InternalObjectNode.class)
.byPage()
.flatMap(page -> Flux.fromIterable(page.getResults()))
.collectList().block();
if (!res.isEmpty()) {
deleteDocument(cosmosContainer, docId);
}
}
public static void safeDeleteDocument(CosmosAsyncContainer cosmosContainer, String documentId, Object partitionKey) {
if (cosmosContainer != null && documentId != null) {
try {
cosmosContainer.deleteItem(documentId, new PartitionKey(partitionKey)).block();
} catch (Exception e) {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null || dce.getStatusCode() != 404) {
throw e;
}
}
}
}
public static void deleteDocument(CosmosAsyncContainer cosmosContainer, String documentId) {
cosmosContainer.deleteItem(documentId, PartitionKey.NONE).block();
}
public static void deleteUserIfExists(CosmosAsyncClient client, String databaseId, String userId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
client.getDatabase(databaseId).read().block();
List<CosmosUserProperties> res = database
.queryUsers(String.format("SELECT * FROM root r where r.id = '%s'", userId), null)
.collectList().block();
if (!res.isEmpty()) {
deleteUser(database, userId);
}
}
public static void deleteUser(CosmosAsyncDatabase database, String userId) {
database.getUser(userId).delete().block();
}
static private CosmosAsyncDatabase safeCreateDatabase(CosmosAsyncClient client, CosmosDatabaseProperties databaseSettings) {
safeDeleteDatabase(client.getDatabase(databaseSettings.getId()));
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosAsyncDatabase createDatabase(CosmosAsyncClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosDatabase createSyncDatabase(CosmosClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
try {
client.createDatabase(databaseSettings);
return client.getDatabase(databaseSettings.getId());
} catch (CosmosException e) {
e.printStackTrace();
}
return null;
}
static protected CosmosAsyncDatabase createDatabaseIfNotExists(CosmosAsyncClient client, String databaseId) {
List<CosmosDatabaseProperties> res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null)
.collectList()
.block();
if (res.size() != 0) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
return database;
} else {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
}
static protected void safeDeleteDatabase(CosmosAsyncDatabase database) {
if (database != null) {
try {
database.delete().block();
} catch (Exception e) {
}
}
}
static protected void safeDeleteSyncDatabase(CosmosDatabase database) {
if (database != null) {
try {
logger.info("attempting to delete database ....");
database.delete();
logger.info("database deletion completed");
} catch (Exception e) {
logger.error("failed to delete sync database", e);
}
}
}
static protected void safeDeleteAllCollections(CosmosAsyncDatabase database) {
if (database != null) {
List<CosmosContainerProperties> collections = database.readAllContainers()
.collectList()
.block();
for(CosmosContainerProperties collection: collections) {
database.getContainer(collection.getId()).delete().block();
}
}
}
static protected void safeDeleteCollection(CosmosAsyncContainer collection) {
if (collection != null) {
try {
logger.info("attempting to delete container {}.{}....",
collection.getDatabase().getId(),
collection.getId());
collection.delete().block();
logger.info("Container {}.{} deletion completed",
collection.getDatabase().getId(),
collection.getId());
} catch (Exception e) {
boolean shouldLogAsError = true;
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
if (cosmosException.getStatusCode() == 404) {
shouldLogAsError = false;
logger.info(
"Container {}.{} does not exist anymore.",
collection.getDatabase().getId(),
collection.getId());
}
}
if (shouldLogAsError) {
logger.error("failed to delete sync container {}.{}",
collection.getDatabase().getId(),
collection.getId(),
e);
}
}
finally {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
}
static protected void safeDeleteCollection(CosmosAsyncDatabase database, String collectionId) {
if (database != null && collectionId != null) {
try {
safeDeleteCollection(database.getContainer(collectionId));
} catch (Exception e) {
}
}
}
static protected void safeCloseAsync(CosmosAsyncClient client) {
if (client != null) {
new Thread(() -> {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}).start();
}
}
static protected void safeClose(CosmosAsyncClient client) {
if (client != null) {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
static protected void safeCloseSyncClient(CosmosClient client) {
if (client != null) {
try {
logger.info("closing client ...");
client.close();
logger.info("closing client completed");
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator) {
validateSuccess(single, validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator, long timeout) {
validateSuccess(single.flux(), validator, timeout);
}
@SuppressWarnings("rawtypes")
public static <T extends CosmosResponse> void validateSuccess(Flux<T> flowable,
CosmosResponseValidator<T> validator, long timeout) {
TestSubscriber<T> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T, U extends CosmosResponse> void validateFailure(Mono<U> mono, FailureValidator validator)
throws InterruptedException {
validateFailure(mono.flux(), validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public static <T extends Resource, U extends CosmosResponse> void validateFailure(Flux<U> flowable,
FailureValidator validator, long timeout) throws InterruptedException {
TestSubscriber<CosmosResponse> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemSuccess(
Mono<T> responseMono, CosmosItemResponseValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemFailure(
Mono<T> responseMono, FailureValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
public <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator) {
validateQuerySuccess(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
validator.validate(testSubscriber.values());
}
public static <T> void validateQuerySuccessWithContinuationTokenAndSizes(
String query,
CosmosAsyncContainer container,
int[] pageSizes,
FeedResponseListValidator<T> validator,
Class<T> classType) {
for (int pageSize : pageSizes) {
List<FeedResponse<T>> receivedDocuments = queryWithContinuationTokens(query, container, pageSize, classType);
validator.validate(receivedDocuments);
}
}
public static <T> List<FeedResponse<T>> queryWithContinuationTokens(
String query,
CosmosAsyncContainer container,
int pageSize,
Class<T> classType) {
String requestContinuation = null;
List<String> continuationTokens = new ArrayList<String>();
List<FeedResponse<T>> responseList = new ArrayList<>();
do {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<T> queryObservable = container.queryItems(query, options, classType);
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
@SuppressWarnings("unchecked")
FeedResponse<T> firstPage = (FeedResponse<T>) testSubscriber.getEvents().get(0).get(0);
requestContinuation = firstPage.getContinuationToken();
responseList.add(firstPage);
continuationTokens.add(requestContinuation);
} while (requestContinuation != null);
return responseList;
}
public <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable, FailureValidator validator) {
validateQueryFailure(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable,
FailureValidator validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.getEvents().get(1)).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@DataProvider
public static Object[][] clientBuilders() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithGateway() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)},
{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}
};
}
@DataProvider
public static Object[][] clientBuilderSolelyDirectWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)}
};
}
static ConsistencyLevel parseConsistency(String consistency) {
if (consistency != null) {
consistency = CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency).trim();
return ConsistencyLevel.valueOf(consistency);
}
logger.error("INVALID configured test consistency [{}].", consistency);
throw new IllegalStateException("INVALID configured test consistency " + consistency);
}
static List<String> parsePreferredLocation(String preferredLocations) {
if (StringUtils.isEmpty(preferredLocations)) {
return null;
}
try {
return objectMapper.readValue(preferredLocations, new TypeReference<List<String>>() {
});
} catch (Exception e) {
logger.error("INVALID configured test preferredLocations [{}].", preferredLocations);
throw new IllegalStateException("INVALID configured test preferredLocations " + preferredLocations);
}
}
static List<Protocol> parseProtocols(String protocols) {
if (StringUtils.isEmpty(protocols)) {
return null;
}
List<Protocol> protocolList = new ArrayList<>();
try {
List<String> protocolStrings = objectMapper.readValue(protocols, new TypeReference<List<String>>() {
});
for(String protocol : protocolStrings) {
protocolList.add(Protocol.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, protocol)));
}
return protocolList;
} catch (Exception e) {
logger.error("INVALID configured test protocols [{}].", protocols);
throw new IllegalStateException("INVALID configured test protocols " + protocols);
}
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirect() {
return simpleClientBuildersWithDirect(true, true, true, toArray(protocols));
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectHttps() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcp() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithJustDirectTcp() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcpWithoutRetryOnThrottledRequests() {
return new Object[][]{
{ createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, false) },
{ createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, false) }
};
}
private static Object[][] simpleClientBuildersWithDirect(
boolean contentResponseOnWriteEnabled,
Protocol... protocols) {
return simpleClientBuildersWithDirect(true, contentResponseOnWriteEnabled, true, protocols);
}
private static Object[][] simpleClientBuildersWithDirect(
boolean includeGateway,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
List<ConsistencyLevel> testConsistencies = ImmutableList.of(ConsistencyLevel.EVENTUAL);
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(
consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
if (includeGateway) {
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
false,
null,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
}
return cosmosConfigurations.stream().map(b -> new Object[]{b}).collect(Collectors.toList()).toArray(new Object[0][]);
}
@DataProvider
public static Object[][] clientBuildersWithDirect() {
return clientBuildersWithDirectAllConsistencies(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectHttps() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcp() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return clientBuildersWithDirectAllConsistencies(false, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithContentResponseOnWriteEnabledAndDisabled() {
Object[][] clientBuildersWithDisabledContentResponseOnWrite =
clientBuildersWithDirectSession(false, true, Protocol.TCP);
Object[][] clientBuildersWithEnabledContentResponseOnWrite =
clientBuildersWithDirectSession(true, true, Protocol.TCP);
int length = clientBuildersWithDisabledContentResponseOnWrite.length
+ clientBuildersWithEnabledContentResponseOnWrite.length;
Object[][] clientBuilders = new Object[length][];
int index = 0;
for (int i = 0; i < clientBuildersWithDisabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithDisabledContentResponseOnWrite[i];
}
for (int i = 0; i < clientBuildersWithEnabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithEnabledContentResponseOnWrite[i];
}
return clientBuilders;
}
@DataProvider
public static Object[][] clientBuildersWithDirectSession() {
return clientBuildersWithDirectSession(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectSessionIncludeComputeGateway() {
Object[][] originalProviders = clientBuildersWithDirectSession(
true,
true,
toArray(protocols));
List<Object[]> providers = new ArrayList<>(Arrays.asList(originalProviders));
Object[] injectedProviderParameters = new Object[1];
CosmosClientBuilder builder = createGatewayRxDocumentClient(
TestConfigurations.HOST.replace(ROUTING_GATEWAY_EMULATOR_PORT, COMPUTE_GATEWAY_EMULATOR_PORT),
ConsistencyLevel.SESSION,
false,
null,
true,
true);
injectedProviderParameters[0] = builder;
providers.add(injectedProviderParameters);
Object[][] array = new Object[providers.size()][];
return providers.toArray(array);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpSession() {
return clientBuildersWithDirectSession(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuilderGatewaySession() {
return clientBuildersWithDirectSession(true, true);
}
static Protocol[] toArray(List<Protocol> protocols) {
return protocols.toArray(new Protocol[protocols.size()]);
}
private static Object[][] clientBuildersWithDirectSession(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
return clientBuildersWithDirect(new ArrayList<ConsistencyLevel>() {{
add(ConsistencyLevel.SESSION);
}}, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
private static Object[][] clientBuildersWithDirectAllConsistencies(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
return clientBuildersWithDirect(desiredConsistencies, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
static List<ConsistencyLevel> parseDesiredConsistencies(String consistencies) {
if (StringUtils.isEmpty(consistencies)) {
return null;
}
List<ConsistencyLevel> consistencyLevels = new ArrayList<>();
try {
List<String> consistencyStrings = objectMapper.readValue(consistencies, new TypeReference<List<String>>() {});
for(String consistency : consistencyStrings) {
consistencyLevels.add(ConsistencyLevel.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency)));
}
return consistencyLevels;
} catch (Exception e) {
logger.error("INVALID consistency test desiredConsistencies [{}].", consistencies);
throw new IllegalStateException("INVALID configured test desiredConsistencies " + consistencies);
}
}
@SuppressWarnings("fallthrough")
static List<ConsistencyLevel> allEqualOrLowerConsistencies(ConsistencyLevel accountConsistency) {
List<ConsistencyLevel> testConsistencies = new ArrayList<>();
switch (accountConsistency) {
case STRONG:
testConsistencies.add(ConsistencyLevel.STRONG);
case BOUNDED_STALENESS:
testConsistencies.add(ConsistencyLevel.BOUNDED_STALENESS);
case SESSION:
testConsistencies.add(ConsistencyLevel.SESSION);
case CONSISTENT_PREFIX:
testConsistencies.add(ConsistencyLevel.CONSISTENT_PREFIX);
case EVENTUAL:
testConsistencies.add(ConsistencyLevel.EVENTUAL);
break;
default:
throw new IllegalStateException("INVALID configured test consistency " + accountConsistency);
}
return testConsistencies;
}
private static Object[][] clientBuildersWithDirect(
List<ConsistencyLevel> testConsistencies,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
return cosmosConfigurations.stream().map(c -> new Object[]{c}).collect(Collectors.toList()).toArray(new Object[0][]);
}
static protected CosmosClientBuilder createGatewayHouseKeepingDocumentClient(boolean contentResponseOnWriteEnabled) {
ThrottlingRetryOptions options = new ThrottlingRetryOptions();
options.setMaxRetryWaitTime(Duration.ofSeconds(SUITE_SETUP_TIMEOUT));
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
return new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.throttlingRetryOptions(options)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(ConsistencyLevel.SESSION);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
return createGatewayRxDocumentClient(
TestConfigurations.HOST,
consistencyLevel,
multiMasterEnabled,
preferredRegions,
contentResponseOnWriteEnabled,
retryOnThrottledRequests);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
String endpoint,
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(endpoint)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.multipleWriteRegionsEnabled(multiMasterEnabled)
.preferredRegions(preferredRegions)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
ImplementationBridgeHelpers
.CosmosClientBuilderHelper
.getCosmosClientBuilderAccessor()
.buildConnectionPolicy(builder);
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
return builder;
}
static protected CosmosClientBuilder createGatewayRxDocumentClient() {
return createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true);
}
static protected CosmosClientBuilder createDirectRxDocumentClient(ConsistencyLevel consistencyLevel,
Protocol protocol,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.directMode(DirectConnectionConfig.getDefaultConfig())
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
if (preferredRegions != null) {
builder.preferredRegions(preferredRegions);
}
if (multiMasterEnabled && consistencyLevel == ConsistencyLevel.SESSION) {
builder.multipleWriteRegionsEnabled(true);
}
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
Configs configs = spy(new Configs());
doAnswer((Answer<Protocol>)invocation -> protocol).when(configs).getProtocol();
return injectConfigs(builder, configs);
}
protected int expectedNumberOfPages(int totalExpectedResult, int maxPageSize) {
return Math.max((totalExpectedResult + maxPageSize - 1 ) / maxPageSize, 1);
}
@DataProvider(name = "queryMetricsArgProvider")
public Object[][] queryMetricsArgProvider() {
return new Object[][]{
{true},
{false},
{null}
};
}
@DataProvider(name = "queryWithOrderByProvider")
public Object[][] queryWithOrderBy() {
return new Object[][]{
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c.id DESC", true },
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c._ts DESC", false }
};
}
public static CosmosClientBuilder copyCosmosClientBuilder(CosmosClientBuilder builder) {
return CosmosBridgeInternal.cloneCosmosClientBuilder(builder);
}
public byte[] decodeHexString(String string) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
for (int i = 0; i < string.length(); i+=2) {
int b = Integer.parseInt(string.substring(i, i + 2), 16);
outputStream.write(b);
}
return outputStream.toByteArray();
}
} | public static Object[][] simpleClientBuildersWithDirectTcpWithoutRetryOnThrottledRequests() { | protected static void truncateCollection(CosmosAsyncContainer cosmosContainer) {
CosmosContainerProperties cosmosContainerProperties = cosmosContainer.read().block().getProperties();
String cosmosContainerId = cosmosContainerProperties.getId();
logger.info("Truncating collection {} ...", cosmosContainerId);
List<String> paths = cosmosContainerProperties.getPartitionKeyDefinition().getPaths();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(-1);
int maxItemCount = 100;
logger.info("Truncating collection {} documents ...", cosmosContainer.getId());
cosmosContainer.queryItems("SELECT * FROM root", options, InternalObjectNode.class)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(doc -> {
PartitionKey partitionKey = null;
Object propertyValue = null;
if (paths != null && !paths.isEmpty()) {
List<String> pkPath = PathParser.getPathParts(paths.get(0));
propertyValue = ModelBridgeInternal.getObjectByPathFromJsonSerializable(doc, pkPath);
if (propertyValue == null) {
partitionKey = PartitionKey.NONE;
} else {
partitionKey = new PartitionKey(propertyValue);
}
} else {
partitionKey = new PartitionKey(null);
}
return cosmosContainer.deleteItem(doc.getId(), partitionKey);
}).then().block();
logger.info("Truncating collection {} triggers ...", cosmosContainerId);
cosmosContainer.getScripts().queryTriggers("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(trigger -> {
return cosmosContainer.getScripts().getTrigger(trigger.getId()).delete();
}).then().block();
logger.info("Truncating collection {} storedProcedures ...", cosmosContainerId);
cosmosContainer.getScripts().queryStoredProcedures("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(storedProcedure -> {
return cosmosContainer.getScripts().getStoredProcedure(storedProcedure.getId()).delete(new CosmosStoredProcedureRequestOptions());
}).then().block();
logger.info("Truncating collection {} udfs ...", cosmosContainerId);
cosmosContainer.getScripts().queryUserDefinedFunctions("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(udf -> {
return cosmosContainer.getScripts().getUserDefinedFunction(udf.getId()).delete();
}).then().block();
logger.info("Finished truncating collection {}.", cosmosContainerId);
}
@SuppressWarnings({"fallthrough"})
protected static void waitIfNeededForReplicasToCatchUp(CosmosClientBuilder clientBuilder) {
switch (CosmosBridgeInternal.getConsistencyLevel(clientBuilder)) {
case EVENTUAL:
case CONSISTENT_PREFIX:
logger.info(" additional wait in EVENTUAL mode so the replica catch up");
try {
TimeUnit.MILLISECONDS.sleep(WAIT_REPLICA_CATCH_UP_IN_MILLIS);
} catch (Exception e) {
logger.error("unexpected failure", e);
}
case SESSION:
case BOUNDED_STALENESS:
case STRONG:
default:
break;
}
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options, int throughput) {
database.createContainer(cosmosContainerProperties, ThroughputProperties.createManualThroughput(throughput), options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options) {
database.createContainer(cosmosContainerProperties, options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
private static CosmosContainerProperties getCollectionDefinitionMultiPartitionWithCompositeAndSpatialIndexes() {
final String NUMBER_FIELD = "numberField";
final String STRING_FIELD = "stringField";
final String NUMBER_FIELD_2 = "numberField2";
final String STRING_FIELD_2 = "stringField2";
final String BOOL_FIELD = "boolField";
final String NULL_FIELD = "nullField";
final String OBJECT_FIELD = "objectField";
final String ARRAY_FIELD = "arrayField";
final String SHORT_STRING_FIELD = "shortStringField";
final String MEDIUM_STRING_FIELD = "mediumStringField";
final String LONG_STRING_FIELD = "longStringField";
final String PARTITION_KEY = "pk";
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
ArrayList<String> partitionKeyPaths = new ArrayList<String>();
partitionKeyPaths.add("/" + PARTITION_KEY);
partitionKeyDefinition.setPaths(partitionKeyPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<List<CompositePath>> compositeIndexes = new ArrayList<>();
ArrayList<CompositePath> compositeIndexSimple = new ArrayList<CompositePath>();
CompositePath compositePath1 = new CompositePath();
compositePath1.setPath("/" + NUMBER_FIELD);
compositePath1.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath2 = new CompositePath();
compositePath2.setPath("/" + STRING_FIELD);
compositePath2.setOrder(CompositePathSortOrder.DESCENDING);
compositeIndexSimple.add(compositePath1);
compositeIndexSimple.add(compositePath2);
ArrayList<CompositePath> compositeIndexMaxColumns = new ArrayList<CompositePath>();
CompositePath compositePath3 = new CompositePath();
compositePath3.setPath("/" + NUMBER_FIELD);
compositePath3.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath4 = new CompositePath();
compositePath4.setPath("/" + STRING_FIELD);
compositePath4.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath5 = new CompositePath();
compositePath5.setPath("/" + NUMBER_FIELD_2);
compositePath5.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath6 = new CompositePath();
compositePath6.setPath("/" + STRING_FIELD_2);
compositePath6.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexMaxColumns.add(compositePath3);
compositeIndexMaxColumns.add(compositePath4);
compositeIndexMaxColumns.add(compositePath5);
compositeIndexMaxColumns.add(compositePath6);
ArrayList<CompositePath> compositeIndexPrimitiveValues = new ArrayList<CompositePath>();
CompositePath compositePath7 = new CompositePath();
compositePath7.setPath("/" + NUMBER_FIELD);
compositePath7.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath8 = new CompositePath();
compositePath8.setPath("/" + STRING_FIELD);
compositePath8.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath9 = new CompositePath();
compositePath9.setPath("/" + BOOL_FIELD);
compositePath9.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath10 = new CompositePath();
compositePath10.setPath("/" + NULL_FIELD);
compositePath10.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexPrimitiveValues.add(compositePath7);
compositeIndexPrimitiveValues.add(compositePath8);
compositeIndexPrimitiveValues.add(compositePath9);
compositeIndexPrimitiveValues.add(compositePath10);
ArrayList<CompositePath> compositeIndexLongStrings = new ArrayList<CompositePath>();
CompositePath compositePath11 = new CompositePath();
compositePath11.setPath("/" + STRING_FIELD);
CompositePath compositePath12 = new CompositePath();
compositePath12.setPath("/" + SHORT_STRING_FIELD);
CompositePath compositePath13 = new CompositePath();
compositePath13.setPath("/" + MEDIUM_STRING_FIELD);
CompositePath compositePath14 = new CompositePath();
compositePath14.setPath("/" + LONG_STRING_FIELD);
compositeIndexLongStrings.add(compositePath11);
compositeIndexLongStrings.add(compositePath12);
compositeIndexLongStrings.add(compositePath13);
compositeIndexLongStrings.add(compositePath14);
compositeIndexes.add(compositeIndexSimple);
compositeIndexes.add(compositeIndexMaxColumns);
compositeIndexes.add(compositeIndexPrimitiveValues);
compositeIndexes.add(compositeIndexLongStrings);
indexingPolicy.setCompositeIndexes(compositeIndexes);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static CosmosAsyncContainer createCollection(CosmosAsyncClient client, String dbId, CosmosContainerProperties collectionDefinition) {
CosmosAsyncDatabase database = client.getDatabase(dbId);
database.createContainer(collectionDefinition).block();
return database.getContainer(collectionDefinition.getId());
}
public static void deleteCollection(CosmosAsyncClient client, String dbId, String collectionId) {
client.getDatabase(dbId).getContainer(collectionId).delete().block();
}
public static InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, InternalObjectNode item) {
return BridgeInternal.getProperties(cosmosContainer.createItem(item).block());
}
public <T> Flux<CosmosItemResponse<T>> bulkInsert(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList,
int concurrencyLevel) {
List<Mono<CosmosItemResponse<T>>> result =
new ArrayList<>(documentDefinitionList.size());
for (T docDef : documentDefinitionList) {
result.add(cosmosContainer.createItem(docDef));
}
return Flux.merge(Flux.fromIterable(result), concurrencyLevel);
}
public <T> List<T> bulkInsertBlocking(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList) {
return bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.map(itemResponse -> itemResponse.getItem())
.collectList()
.block();
}
public <T> void voidBulkInsertBlocking(CosmosAsyncContainer cosmosContainer, List<T> documentDefinitionList) {
bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.then()
.block();
}
public static CosmosAsyncUser createUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties userSettings) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
CosmosUserResponse userResponse = database.createUser(userSettings).block();
return database.getUser(userResponse.getProperties().getId());
}
public static CosmosAsyncUser safeCreateUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties user) {
deleteUserIfExists(client, databaseId, user.getId());
return createUser(client, databaseId, user);
}
private static CosmosAsyncContainer safeCreateCollection(CosmosAsyncClient client, String databaseId, CosmosContainerProperties collection, CosmosContainerRequestOptions options) {
deleteCollectionIfExists(client, databaseId, collection.getId());
return createCollection(client.getDatabase(databaseId), collection, options);
}
static protected CosmosContainerProperties getCollectionDefinitionWithFullFidelity() {
CosmosContainerProperties cosmosContainerProperties = getCollectionDefinition(UUID.randomUUID().toString());
cosmosContainerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(5)));
return cosmosContainerProperties;
}
static protected CosmosContainerProperties getCollectionDefinition() {
return getCollectionDefinition(UUID.randomUUID().toString());
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId, PartitionKeyDefinition partitionKeyDefinition) {
return new CosmosContainerProperties(collectionId, partitionKeyDefinition);
}
static protected CosmosContainerProperties getCollectionDefinitionForHashV2(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
partitionKeyDef.setVersion(PartitionKeyDefinitionVersion.V2);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndexWithIdAsPartitionKey() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/id"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/mypk"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex(List<String> partitionKeyPath) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
partitionKeyDef.setPaths(partitionKeyPath);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static void deleteCollectionIfExists(CosmosAsyncClient client, String databaseId, String collectionId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
List<CosmosContainerProperties> res = database.queryContainers(String.format("SELECT * FROM root r where r.id = '%s'", collectionId), null)
.collectList()
.block();
if (!res.isEmpty()) {
deleteCollection(database, collectionId);
}
}
public static void deleteCollection(CosmosAsyncDatabase cosmosDatabase, String collectionId) {
cosmosDatabase.getContainer(collectionId).delete().block();
}
public static void deleteCollection(CosmosAsyncContainer cosmosContainer) {
cosmosContainer.delete().block();
}
public static void deleteDocumentIfExists(CosmosAsyncClient client, String databaseId, String collectionId, String docId) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(docId));
CosmosAsyncContainer cosmosContainer = client.getDatabase(databaseId).getContainer(collectionId);
List<InternalObjectNode> res = cosmosContainer
.queryItems(String.format("SELECT * FROM root r where r.id = '%s'", docId), options, InternalObjectNode.class)
.byPage()
.flatMap(page -> Flux.fromIterable(page.getResults()))
.collectList().block();
if (!res.isEmpty()) {
deleteDocument(cosmosContainer, docId);
}
}
public static void safeDeleteDocument(CosmosAsyncContainer cosmosContainer, String documentId, Object partitionKey) {
if (cosmosContainer != null && documentId != null) {
try {
cosmosContainer.deleteItem(documentId, new PartitionKey(partitionKey)).block();
} catch (Exception e) {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null || dce.getStatusCode() != 404) {
throw e;
}
}
}
}
public static void deleteDocument(CosmosAsyncContainer cosmosContainer, String documentId) {
cosmosContainer.deleteItem(documentId, PartitionKey.NONE).block();
}
public static void deleteUserIfExists(CosmosAsyncClient client, String databaseId, String userId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
client.getDatabase(databaseId).read().block();
List<CosmosUserProperties> res = database
.queryUsers(String.format("SELECT * FROM root r where r.id = '%s'", userId), null)
.collectList().block();
if (!res.isEmpty()) {
deleteUser(database, userId);
}
}
public static void deleteUser(CosmosAsyncDatabase database, String userId) {
database.getUser(userId).delete().block();
}
static private CosmosAsyncDatabase safeCreateDatabase(CosmosAsyncClient client, CosmosDatabaseProperties databaseSettings) {
safeDeleteDatabase(client.getDatabase(databaseSettings.getId()));
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosAsyncDatabase createDatabase(CosmosAsyncClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosDatabase createSyncDatabase(CosmosClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
try {
client.createDatabase(databaseSettings);
return client.getDatabase(databaseSettings.getId());
} catch (CosmosException e) {
e.printStackTrace();
}
return null;
}
static protected CosmosAsyncDatabase createDatabaseIfNotExists(CosmosAsyncClient client, String databaseId) {
List<CosmosDatabaseProperties> res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null)
.collectList()
.block();
if (res.size() != 0) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
return database;
} else {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
}
static protected void safeDeleteDatabase(CosmosAsyncDatabase database) {
if (database != null) {
try {
database.delete().block();
} catch (Exception e) {
}
}
}
static protected void safeDeleteSyncDatabase(CosmosDatabase database) {
if (database != null) {
try {
logger.info("attempting to delete database ....");
database.delete();
logger.info("database deletion completed");
} catch (Exception e) {
logger.error("failed to delete sync database", e);
}
}
}
static protected void safeDeleteAllCollections(CosmosAsyncDatabase database) {
if (database != null) {
List<CosmosContainerProperties> collections = database.readAllContainers()
.collectList()
.block();
for(CosmosContainerProperties collection: collections) {
database.getContainer(collection.getId()).delete().block();
}
}
}
static protected void safeDeleteCollection(CosmosAsyncContainer collection) {
if (collection != null) {
try {
logger.info("attempting to delete container {}.{}....",
collection.getDatabase().getId(),
collection.getId());
collection.delete().block();
logger.info("Container {}.{} deletion completed",
collection.getDatabase().getId(),
collection.getId());
} catch (Exception e) {
boolean shouldLogAsError = true;
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
if (cosmosException.getStatusCode() == 404) {
shouldLogAsError = false;
logger.info(
"Container {}.{} does not exist anymore.",
collection.getDatabase().getId(),
collection.getId());
}
}
if (shouldLogAsError) {
logger.error("failed to delete sync container {}.{}",
collection.getDatabase().getId(),
collection.getId(),
e);
}
}
finally {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
}
static protected void safeDeleteCollection(CosmosAsyncDatabase database, String collectionId) {
if (database != null && collectionId != null) {
try {
safeDeleteCollection(database.getContainer(collectionId));
} catch (Exception e) {
}
}
}
static protected void safeCloseAsync(CosmosAsyncClient client) {
if (client != null) {
new Thread(() -> {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}).start();
}
}
static protected void safeClose(CosmosAsyncClient client) {
if (client != null) {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
static protected void safeCloseSyncClient(CosmosClient client) {
if (client != null) {
try {
logger.info("closing client ...");
client.close();
logger.info("closing client completed");
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator) {
validateSuccess(single, validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator, long timeout) {
validateSuccess(single.flux(), validator, timeout);
}
@SuppressWarnings("rawtypes")
public static <T extends CosmosResponse> void validateSuccess(Flux<T> flowable,
CosmosResponseValidator<T> validator, long timeout) {
TestSubscriber<T> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T, U extends CosmosResponse> void validateFailure(Mono<U> mono, FailureValidator validator)
throws InterruptedException {
validateFailure(mono.flux(), validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public static <T extends Resource, U extends CosmosResponse> void validateFailure(Flux<U> flowable,
FailureValidator validator, long timeout) throws InterruptedException {
TestSubscriber<CosmosResponse> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemSuccess(
Mono<T> responseMono, CosmosItemResponseValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemFailure(
Mono<T> responseMono, FailureValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
public <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator) {
validateQuerySuccess(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
validator.validate(testSubscriber.values());
}
public static <T> void validateQuerySuccessWithContinuationTokenAndSizes(
String query,
CosmosAsyncContainer container,
int[] pageSizes,
FeedResponseListValidator<T> validator,
Class<T> classType) {
for (int pageSize : pageSizes) {
List<FeedResponse<T>> receivedDocuments = queryWithContinuationTokens(query, container, pageSize, classType);
validator.validate(receivedDocuments);
}
}
public static <T> List<FeedResponse<T>> queryWithContinuationTokens(
String query,
CosmosAsyncContainer container,
int pageSize,
Class<T> classType) {
String requestContinuation = null;
List<String> continuationTokens = new ArrayList<String>();
List<FeedResponse<T>> responseList = new ArrayList<>();
do {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<T> queryObservable = container.queryItems(query, options, classType);
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
@SuppressWarnings("unchecked")
FeedResponse<T> firstPage = (FeedResponse<T>) testSubscriber.getEvents().get(0).get(0);
requestContinuation = firstPage.getContinuationToken();
responseList.add(firstPage);
continuationTokens.add(requestContinuation);
} while (requestContinuation != null);
return responseList;
}
public <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable, FailureValidator validator) {
validateQueryFailure(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable,
FailureValidator validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.getEvents().get(1)).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@DataProvider
public static Object[][] clientBuilders() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithGateway() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)},
{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}
};
}
@DataProvider
public static Object[][] clientBuilderSolelyDirectWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)}
};
}
static ConsistencyLevel parseConsistency(String consistency) {
if (consistency != null) {
consistency = CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency).trim();
return ConsistencyLevel.valueOf(consistency);
}
logger.error("INVALID configured test consistency [{}].", consistency);
throw new IllegalStateException("INVALID configured test consistency " + consistency);
}
static List<String> parsePreferredLocation(String preferredLocations) {
if (StringUtils.isEmpty(preferredLocations)) {
return null;
}
try {
return objectMapper.readValue(preferredLocations, new TypeReference<List<String>>() {
});
} catch (Exception e) {
logger.error("INVALID configured test preferredLocations [{}].", preferredLocations);
throw new IllegalStateException("INVALID configured test preferredLocations " + preferredLocations);
}
}
static List<Protocol> parseProtocols(String protocols) {
if (StringUtils.isEmpty(protocols)) {
return null;
}
List<Protocol> protocolList = new ArrayList<>();
try {
List<String> protocolStrings = objectMapper.readValue(protocols, new TypeReference<List<String>>() {
});
for(String protocol : protocolStrings) {
protocolList.add(Protocol.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, protocol)));
}
return protocolList;
} catch (Exception e) {
logger.error("INVALID configured test protocols [{}].", protocols);
throw new IllegalStateException("INVALID configured test protocols " + protocols);
}
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirect() {
return simpleClientBuildersWithDirect(true, true, true, toArray(protocols));
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectHttps() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcp() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithJustDirectTcp() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithoutRetryOnThrottledRequests() {
return new Object[][]{
{ createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, false) },
{ createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, false) }
};
}
private static Object[][] simpleClientBuildersWithDirect(
boolean contentResponseOnWriteEnabled,
Protocol... protocols) {
return simpleClientBuildersWithDirect(true, contentResponseOnWriteEnabled, true, protocols);
}
private static Object[][] simpleClientBuildersWithDirect(
boolean includeGateway,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
List<ConsistencyLevel> testConsistencies = ImmutableList.of(ConsistencyLevel.EVENTUAL);
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(
consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
if (includeGateway) {
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
false,
null,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
}
return cosmosConfigurations.stream().map(b -> new Object[]{b}).collect(Collectors.toList()).toArray(new Object[0][]);
}
@DataProvider
public static Object[][] clientBuildersWithDirect() {
return clientBuildersWithDirectAllConsistencies(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectHttps() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcp() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return clientBuildersWithDirectAllConsistencies(false, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithContentResponseOnWriteEnabledAndDisabled() {
Object[][] clientBuildersWithDisabledContentResponseOnWrite =
clientBuildersWithDirectSession(false, true, Protocol.TCP);
Object[][] clientBuildersWithEnabledContentResponseOnWrite =
clientBuildersWithDirectSession(true, true, Protocol.TCP);
int length = clientBuildersWithDisabledContentResponseOnWrite.length
+ clientBuildersWithEnabledContentResponseOnWrite.length;
Object[][] clientBuilders = new Object[length][];
int index = 0;
for (int i = 0; i < clientBuildersWithDisabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithDisabledContentResponseOnWrite[i];
}
for (int i = 0; i < clientBuildersWithEnabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithEnabledContentResponseOnWrite[i];
}
return clientBuilders;
}
@DataProvider
public static Object[][] clientBuildersWithDirectSession() {
return clientBuildersWithDirectSession(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectSessionIncludeComputeGateway() {
Object[][] originalProviders = clientBuildersWithDirectSession(
true,
true,
toArray(protocols));
List<Object[]> providers = new ArrayList<>(Arrays.asList(originalProviders));
Object[] injectedProviderParameters = new Object[1];
CosmosClientBuilder builder = createGatewayRxDocumentClient(
TestConfigurations.HOST.replace(ROUTING_GATEWAY_EMULATOR_PORT, COMPUTE_GATEWAY_EMULATOR_PORT),
ConsistencyLevel.SESSION,
false,
null,
true,
true);
injectedProviderParameters[0] = builder;
providers.add(injectedProviderParameters);
Object[][] array = new Object[providers.size()][];
return providers.toArray(array);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpSession() {
return clientBuildersWithDirectSession(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuilderGatewaySession() {
return clientBuildersWithDirectSession(true, true);
}
static Protocol[] toArray(List<Protocol> protocols) {
return protocols.toArray(new Protocol[protocols.size()]);
}
private static Object[][] clientBuildersWithDirectSession(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
return clientBuildersWithDirect(new ArrayList<ConsistencyLevel>() {{
add(ConsistencyLevel.SESSION);
}}, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
private static Object[][] clientBuildersWithDirectAllConsistencies(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
return clientBuildersWithDirect(desiredConsistencies, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
static List<ConsistencyLevel> parseDesiredConsistencies(String consistencies) {
if (StringUtils.isEmpty(consistencies)) {
return null;
}
List<ConsistencyLevel> consistencyLevels = new ArrayList<>();
try {
List<String> consistencyStrings = objectMapper.readValue(consistencies, new TypeReference<List<String>>() {});
for(String consistency : consistencyStrings) {
consistencyLevels.add(ConsistencyLevel.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency)));
}
return consistencyLevels;
} catch (Exception e) {
logger.error("INVALID consistency test desiredConsistencies [{}].", consistencies);
throw new IllegalStateException("INVALID configured test desiredConsistencies " + consistencies);
}
}
@SuppressWarnings("fallthrough")
static List<ConsistencyLevel> allEqualOrLowerConsistencies(ConsistencyLevel accountConsistency) {
List<ConsistencyLevel> testConsistencies = new ArrayList<>();
switch (accountConsistency) {
case STRONG:
testConsistencies.add(ConsistencyLevel.STRONG);
case BOUNDED_STALENESS:
testConsistencies.add(ConsistencyLevel.BOUNDED_STALENESS);
case SESSION:
testConsistencies.add(ConsistencyLevel.SESSION);
case CONSISTENT_PREFIX:
testConsistencies.add(ConsistencyLevel.CONSISTENT_PREFIX);
case EVENTUAL:
testConsistencies.add(ConsistencyLevel.EVENTUAL);
break;
default:
throw new IllegalStateException("INVALID configured test consistency " + accountConsistency);
}
return testConsistencies;
}
private static Object[][] clientBuildersWithDirect(
List<ConsistencyLevel> testConsistencies,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
return cosmosConfigurations.stream().map(c -> new Object[]{c}).collect(Collectors.toList()).toArray(new Object[0][]);
}
static protected CosmosClientBuilder createGatewayHouseKeepingDocumentClient(boolean contentResponseOnWriteEnabled) {
ThrottlingRetryOptions options = new ThrottlingRetryOptions();
options.setMaxRetryWaitTime(Duration.ofSeconds(SUITE_SETUP_TIMEOUT));
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
return new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.throttlingRetryOptions(options)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(ConsistencyLevel.SESSION);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
return createGatewayRxDocumentClient(
TestConfigurations.HOST,
consistencyLevel,
multiMasterEnabled,
preferredRegions,
contentResponseOnWriteEnabled,
retryOnThrottledRequests);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
String endpoint,
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(endpoint)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.multipleWriteRegionsEnabled(multiMasterEnabled)
.preferredRegions(preferredRegions)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
ImplementationBridgeHelpers
.CosmosClientBuilderHelper
.getCosmosClientBuilderAccessor()
.buildConnectionPolicy(builder);
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
return builder;
}
static protected CosmosClientBuilder createGatewayRxDocumentClient() {
return createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true);
}
static protected CosmosClientBuilder createDirectRxDocumentClient(ConsistencyLevel consistencyLevel,
Protocol protocol,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.directMode(DirectConnectionConfig.getDefaultConfig())
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
if (preferredRegions != null) {
builder.preferredRegions(preferredRegions);
}
if (multiMasterEnabled && consistencyLevel == ConsistencyLevel.SESSION) {
builder.multipleWriteRegionsEnabled(true);
}
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
Configs configs = spy(new Configs());
doAnswer((Answer<Protocol>)invocation -> protocol).when(configs).getProtocol();
return injectConfigs(builder, configs);
}
protected int expectedNumberOfPages(int totalExpectedResult, int maxPageSize) {
return Math.max((totalExpectedResult + maxPageSize - 1 ) / maxPageSize, 1);
}
@DataProvider(name = "queryMetricsArgProvider")
public Object[][] queryMetricsArgProvider() {
return new Object[][]{
{true},
{false},
{null}
};
}
@DataProvider(name = "queryWithOrderByProvider")
public Object[][] queryWithOrderBy() {
return new Object[][]{
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c.id DESC", true },
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c._ts DESC", false }
};
}
public static CosmosClientBuilder copyCosmosClientBuilder(CosmosClientBuilder builder) {
return CosmosBridgeInternal.cloneCosmosClientBuilder(builder);
}
public byte[] decodeHexString(String string) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
for (int i = 0; i < string.length(); i+=2) {
int b = Integer.parseInt(string.substring(i, i + 2), 16);
outputStream.write(b);
}
return outputStream.toByteArray();
}
} | class DatabaseManagerImpl implements CosmosDatabaseForTest.DatabaseManager {
public static DatabaseManagerImpl getInstance(CosmosAsyncClient client) {
return new DatabaseManagerImpl(client);
}
private final CosmosAsyncClient client;
private DatabaseManagerImpl(CosmosAsyncClient client) {
this.client = client;
}
@Override
public CosmosPagedFlux<CosmosDatabaseProperties> queryDatabases(SqlQuerySpec query) {
return client.queryDatabases(query, null);
}
@Override
public Mono<CosmosDatabaseResponse> createDatabase(CosmosDatabaseProperties databaseDefinition) {
return client.createDatabase(databaseDefinition);
}
@Override
public CosmosAsyncDatabase getDatabase(String id) {
return client.getDatabase(id);
}
} | class DatabaseManagerImpl implements CosmosDatabaseForTest.DatabaseManager {
public static DatabaseManagerImpl getInstance(CosmosAsyncClient client) {
return new DatabaseManagerImpl(client);
}
private final CosmosAsyncClient client;
private DatabaseManagerImpl(CosmosAsyncClient client) {
this.client = client;
}
@Override
public CosmosPagedFlux<CosmosDatabaseProperties> queryDatabases(SqlQuerySpec query) {
return client.queryDatabases(query, null);
}
@Override
public Mono<CosmosDatabaseResponse> createDatabase(CosmosDatabaseProperties databaseDefinition) {
return client.createDatabase(databaseDefinition);
}
@Override
public CosmosAsyncDatabase getDatabase(String id) {
return client.getDatabase(id);
}
} |
updated | protected static void truncateCollection(CosmosAsyncContainer cosmosContainer) {
CosmosContainerProperties cosmosContainerProperties = cosmosContainer.read().block().getProperties();
String cosmosContainerId = cosmosContainerProperties.getId();
logger.info("Truncating collection {} ...", cosmosContainerId);
List<String> paths = cosmosContainerProperties.getPartitionKeyDefinition().getPaths();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(-1);
int maxItemCount = 100;
logger.info("Truncating collection {} documents ...", cosmosContainer.getId());
cosmosContainer.queryItems("SELECT * FROM root", options, InternalObjectNode.class)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(doc -> {
PartitionKey partitionKey = null;
Object propertyValue = null;
if (paths != null && !paths.isEmpty()) {
List<String> pkPath = PathParser.getPathParts(paths.get(0));
propertyValue = ModelBridgeInternal.getObjectByPathFromJsonSerializable(doc, pkPath);
if (propertyValue == null) {
partitionKey = PartitionKey.NONE;
} else {
partitionKey = new PartitionKey(propertyValue);
}
} else {
partitionKey = new PartitionKey(null);
}
return cosmosContainer.deleteItem(doc.getId(), partitionKey);
}).then().block();
logger.info("Truncating collection {} triggers ...", cosmosContainerId);
cosmosContainer.getScripts().queryTriggers("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(trigger -> {
return cosmosContainer.getScripts().getTrigger(trigger.getId()).delete();
}).then().block();
logger.info("Truncating collection {} storedProcedures ...", cosmosContainerId);
cosmosContainer.getScripts().queryStoredProcedures("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(storedProcedure -> {
return cosmosContainer.getScripts().getStoredProcedure(storedProcedure.getId()).delete(new CosmosStoredProcedureRequestOptions());
}).then().block();
logger.info("Truncating collection {} udfs ...", cosmosContainerId);
cosmosContainer.getScripts().queryUserDefinedFunctions("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(udf -> {
return cosmosContainer.getScripts().getUserDefinedFunction(udf.getId()).delete();
}).then().block();
logger.info("Finished truncating collection {}.", cosmosContainerId);
}
@SuppressWarnings({"fallthrough"})
protected static void waitIfNeededForReplicasToCatchUp(CosmosClientBuilder clientBuilder) {
switch (CosmosBridgeInternal.getConsistencyLevel(clientBuilder)) {
case EVENTUAL:
case CONSISTENT_PREFIX:
logger.info(" additional wait in EVENTUAL mode so the replica catch up");
try {
TimeUnit.MILLISECONDS.sleep(WAIT_REPLICA_CATCH_UP_IN_MILLIS);
} catch (Exception e) {
logger.error("unexpected failure", e);
}
case SESSION:
case BOUNDED_STALENESS:
case STRONG:
default:
break;
}
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options, int throughput) {
database.createContainer(cosmosContainerProperties, ThroughputProperties.createManualThroughput(throughput), options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options) {
database.createContainer(cosmosContainerProperties, options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
private static CosmosContainerProperties getCollectionDefinitionMultiPartitionWithCompositeAndSpatialIndexes() {
final String NUMBER_FIELD = "numberField";
final String STRING_FIELD = "stringField";
final String NUMBER_FIELD_2 = "numberField2";
final String STRING_FIELD_2 = "stringField2";
final String BOOL_FIELD = "boolField";
final String NULL_FIELD = "nullField";
final String OBJECT_FIELD = "objectField";
final String ARRAY_FIELD = "arrayField";
final String SHORT_STRING_FIELD = "shortStringField";
final String MEDIUM_STRING_FIELD = "mediumStringField";
final String LONG_STRING_FIELD = "longStringField";
final String PARTITION_KEY = "pk";
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
ArrayList<String> partitionKeyPaths = new ArrayList<String>();
partitionKeyPaths.add("/" + PARTITION_KEY);
partitionKeyDefinition.setPaths(partitionKeyPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<List<CompositePath>> compositeIndexes = new ArrayList<>();
ArrayList<CompositePath> compositeIndexSimple = new ArrayList<CompositePath>();
CompositePath compositePath1 = new CompositePath();
compositePath1.setPath("/" + NUMBER_FIELD);
compositePath1.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath2 = new CompositePath();
compositePath2.setPath("/" + STRING_FIELD);
compositePath2.setOrder(CompositePathSortOrder.DESCENDING);
compositeIndexSimple.add(compositePath1);
compositeIndexSimple.add(compositePath2);
ArrayList<CompositePath> compositeIndexMaxColumns = new ArrayList<CompositePath>();
CompositePath compositePath3 = new CompositePath();
compositePath3.setPath("/" + NUMBER_FIELD);
compositePath3.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath4 = new CompositePath();
compositePath4.setPath("/" + STRING_FIELD);
compositePath4.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath5 = new CompositePath();
compositePath5.setPath("/" + NUMBER_FIELD_2);
compositePath5.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath6 = new CompositePath();
compositePath6.setPath("/" + STRING_FIELD_2);
compositePath6.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexMaxColumns.add(compositePath3);
compositeIndexMaxColumns.add(compositePath4);
compositeIndexMaxColumns.add(compositePath5);
compositeIndexMaxColumns.add(compositePath6);
ArrayList<CompositePath> compositeIndexPrimitiveValues = new ArrayList<CompositePath>();
CompositePath compositePath7 = new CompositePath();
compositePath7.setPath("/" + NUMBER_FIELD);
compositePath7.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath8 = new CompositePath();
compositePath8.setPath("/" + STRING_FIELD);
compositePath8.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath9 = new CompositePath();
compositePath9.setPath("/" + BOOL_FIELD);
compositePath9.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath10 = new CompositePath();
compositePath10.setPath("/" + NULL_FIELD);
compositePath10.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexPrimitiveValues.add(compositePath7);
compositeIndexPrimitiveValues.add(compositePath8);
compositeIndexPrimitiveValues.add(compositePath9);
compositeIndexPrimitiveValues.add(compositePath10);
ArrayList<CompositePath> compositeIndexLongStrings = new ArrayList<CompositePath>();
CompositePath compositePath11 = new CompositePath();
compositePath11.setPath("/" + STRING_FIELD);
CompositePath compositePath12 = new CompositePath();
compositePath12.setPath("/" + SHORT_STRING_FIELD);
CompositePath compositePath13 = new CompositePath();
compositePath13.setPath("/" + MEDIUM_STRING_FIELD);
CompositePath compositePath14 = new CompositePath();
compositePath14.setPath("/" + LONG_STRING_FIELD);
compositeIndexLongStrings.add(compositePath11);
compositeIndexLongStrings.add(compositePath12);
compositeIndexLongStrings.add(compositePath13);
compositeIndexLongStrings.add(compositePath14);
compositeIndexes.add(compositeIndexSimple);
compositeIndexes.add(compositeIndexMaxColumns);
compositeIndexes.add(compositeIndexPrimitiveValues);
compositeIndexes.add(compositeIndexLongStrings);
indexingPolicy.setCompositeIndexes(compositeIndexes);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static CosmosAsyncContainer createCollection(CosmosAsyncClient client, String dbId, CosmosContainerProperties collectionDefinition) {
CosmosAsyncDatabase database = client.getDatabase(dbId);
database.createContainer(collectionDefinition).block();
return database.getContainer(collectionDefinition.getId());
}
public static void deleteCollection(CosmosAsyncClient client, String dbId, String collectionId) {
client.getDatabase(dbId).getContainer(collectionId).delete().block();
}
public static InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, InternalObjectNode item) {
return BridgeInternal.getProperties(cosmosContainer.createItem(item).block());
}
public <T> Flux<CosmosItemResponse<T>> bulkInsert(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList,
int concurrencyLevel) {
List<Mono<CosmosItemResponse<T>>> result =
new ArrayList<>(documentDefinitionList.size());
for (T docDef : documentDefinitionList) {
result.add(cosmosContainer.createItem(docDef));
}
return Flux.merge(Flux.fromIterable(result), concurrencyLevel);
}
public <T> List<T> bulkInsertBlocking(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList) {
return bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.map(itemResponse -> itemResponse.getItem())
.collectList()
.block();
}
public <T> void voidBulkInsertBlocking(CosmosAsyncContainer cosmosContainer, List<T> documentDefinitionList) {
bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.then()
.block();
}
public static CosmosAsyncUser createUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties userSettings) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
CosmosUserResponse userResponse = database.createUser(userSettings).block();
return database.getUser(userResponse.getProperties().getId());
}
public static CosmosAsyncUser safeCreateUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties user) {
deleteUserIfExists(client, databaseId, user.getId());
return createUser(client, databaseId, user);
}
private static CosmosAsyncContainer safeCreateCollection(CosmosAsyncClient client, String databaseId, CosmosContainerProperties collection, CosmosContainerRequestOptions options) {
deleteCollectionIfExists(client, databaseId, collection.getId());
return createCollection(client.getDatabase(databaseId), collection, options);
}
static protected CosmosContainerProperties getCollectionDefinitionWithFullFidelity() {
CosmosContainerProperties cosmosContainerProperties = getCollectionDefinition(UUID.randomUUID().toString());
cosmosContainerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(5)));
return cosmosContainerProperties;
}
static protected CosmosContainerProperties getCollectionDefinition() {
return getCollectionDefinition(UUID.randomUUID().toString());
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId, PartitionKeyDefinition partitionKeyDefinition) {
return new CosmosContainerProperties(collectionId, partitionKeyDefinition);
}
static protected CosmosContainerProperties getCollectionDefinitionForHashV2(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
partitionKeyDef.setVersion(PartitionKeyDefinitionVersion.V2);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndexWithIdAsPartitionKey() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/id"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/mypk"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex(List<String> partitionKeyPath) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
partitionKeyDef.setPaths(partitionKeyPath);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static void deleteCollectionIfExists(CosmosAsyncClient client, String databaseId, String collectionId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
List<CosmosContainerProperties> res = database.queryContainers(String.format("SELECT * FROM root r where r.id = '%s'", collectionId), null)
.collectList()
.block();
if (!res.isEmpty()) {
deleteCollection(database, collectionId);
}
}
public static void deleteCollection(CosmosAsyncDatabase cosmosDatabase, String collectionId) {
cosmosDatabase.getContainer(collectionId).delete().block();
}
public static void deleteCollection(CosmosAsyncContainer cosmosContainer) {
cosmosContainer.delete().block();
}
public static void deleteDocumentIfExists(CosmosAsyncClient client, String databaseId, String collectionId, String docId) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(docId));
CosmosAsyncContainer cosmosContainer = client.getDatabase(databaseId).getContainer(collectionId);
List<InternalObjectNode> res = cosmosContainer
.queryItems(String.format("SELECT * FROM root r where r.id = '%s'", docId), options, InternalObjectNode.class)
.byPage()
.flatMap(page -> Flux.fromIterable(page.getResults()))
.collectList().block();
if (!res.isEmpty()) {
deleteDocument(cosmosContainer, docId);
}
}
public static void safeDeleteDocument(CosmosAsyncContainer cosmosContainer, String documentId, Object partitionKey) {
if (cosmosContainer != null && documentId != null) {
try {
cosmosContainer.deleteItem(documentId, new PartitionKey(partitionKey)).block();
} catch (Exception e) {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null || dce.getStatusCode() != 404) {
throw e;
}
}
}
}
public static void deleteDocument(CosmosAsyncContainer cosmosContainer, String documentId) {
cosmosContainer.deleteItem(documentId, PartitionKey.NONE).block();
}
public static void deleteUserIfExists(CosmosAsyncClient client, String databaseId, String userId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
client.getDatabase(databaseId).read().block();
List<CosmosUserProperties> res = database
.queryUsers(String.format("SELECT * FROM root r where r.id = '%s'", userId), null)
.collectList().block();
if (!res.isEmpty()) {
deleteUser(database, userId);
}
}
public static void deleteUser(CosmosAsyncDatabase database, String userId) {
database.getUser(userId).delete().block();
}
static private CosmosAsyncDatabase safeCreateDatabase(CosmosAsyncClient client, CosmosDatabaseProperties databaseSettings) {
safeDeleteDatabase(client.getDatabase(databaseSettings.getId()));
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosAsyncDatabase createDatabase(CosmosAsyncClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosDatabase createSyncDatabase(CosmosClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
try {
client.createDatabase(databaseSettings);
return client.getDatabase(databaseSettings.getId());
} catch (CosmosException e) {
e.printStackTrace();
}
return null;
}
static protected CosmosAsyncDatabase createDatabaseIfNotExists(CosmosAsyncClient client, String databaseId) {
List<CosmosDatabaseProperties> res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null)
.collectList()
.block();
if (res.size() != 0) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
return database;
} else {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
}
static protected void safeDeleteDatabase(CosmosAsyncDatabase database) {
if (database != null) {
try {
database.delete().block();
} catch (Exception e) {
}
}
}
static protected void safeDeleteSyncDatabase(CosmosDatabase database) {
if (database != null) {
try {
logger.info("attempting to delete database ....");
database.delete();
logger.info("database deletion completed");
} catch (Exception e) {
logger.error("failed to delete sync database", e);
}
}
}
static protected void safeDeleteAllCollections(CosmosAsyncDatabase database) {
if (database != null) {
List<CosmosContainerProperties> collections = database.readAllContainers()
.collectList()
.block();
for(CosmosContainerProperties collection: collections) {
database.getContainer(collection.getId()).delete().block();
}
}
}
static protected void safeDeleteCollection(CosmosAsyncContainer collection) {
if (collection != null) {
try {
logger.info("attempting to delete container {}.{}....",
collection.getDatabase().getId(),
collection.getId());
collection.delete().block();
logger.info("Container {}.{} deletion completed",
collection.getDatabase().getId(),
collection.getId());
} catch (Exception e) {
boolean shouldLogAsError = true;
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
if (cosmosException.getStatusCode() == 404) {
shouldLogAsError = false;
logger.info(
"Container {}.{} does not exist anymore.",
collection.getDatabase().getId(),
collection.getId());
}
}
if (shouldLogAsError) {
logger.error("failed to delete sync container {}.{}",
collection.getDatabase().getId(),
collection.getId(),
e);
}
}
finally {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
}
static protected void safeDeleteCollection(CosmosAsyncDatabase database, String collectionId) {
if (database != null && collectionId != null) {
try {
safeDeleteCollection(database.getContainer(collectionId));
} catch (Exception e) {
}
}
}
static protected void safeCloseAsync(CosmosAsyncClient client) {
if (client != null) {
new Thread(() -> {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}).start();
}
}
static protected void safeClose(CosmosAsyncClient client) {
if (client != null) {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
static protected void safeCloseSyncClient(CosmosClient client) {
if (client != null) {
try {
logger.info("closing client ...");
client.close();
logger.info("closing client completed");
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator) {
validateSuccess(single, validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator, long timeout) {
validateSuccess(single.flux(), validator, timeout);
}
@SuppressWarnings("rawtypes")
public static <T extends CosmosResponse> void validateSuccess(Flux<T> flowable,
CosmosResponseValidator<T> validator, long timeout) {
TestSubscriber<T> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T, U extends CosmosResponse> void validateFailure(Mono<U> mono, FailureValidator validator)
throws InterruptedException {
validateFailure(mono.flux(), validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public static <T extends Resource, U extends CosmosResponse> void validateFailure(Flux<U> flowable,
FailureValidator validator, long timeout) throws InterruptedException {
TestSubscriber<CosmosResponse> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemSuccess(
Mono<T> responseMono, CosmosItemResponseValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemFailure(
Mono<T> responseMono, FailureValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
public <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator) {
validateQuerySuccess(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
validator.validate(testSubscriber.values());
}
public static <T> void validateQuerySuccessWithContinuationTokenAndSizes(
String query,
CosmosAsyncContainer container,
int[] pageSizes,
FeedResponseListValidator<T> validator,
Class<T> classType) {
for (int pageSize : pageSizes) {
List<FeedResponse<T>> receivedDocuments = queryWithContinuationTokens(query, container, pageSize, classType);
validator.validate(receivedDocuments);
}
}
public static <T> List<FeedResponse<T>> queryWithContinuationTokens(
String query,
CosmosAsyncContainer container,
int pageSize,
Class<T> classType) {
String requestContinuation = null;
List<String> continuationTokens = new ArrayList<String>();
List<FeedResponse<T>> responseList = new ArrayList<>();
do {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<T> queryObservable = container.queryItems(query, options, classType);
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
@SuppressWarnings("unchecked")
FeedResponse<T> firstPage = (FeedResponse<T>) testSubscriber.getEvents().get(0).get(0);
requestContinuation = firstPage.getContinuationToken();
responseList.add(firstPage);
continuationTokens.add(requestContinuation);
} while (requestContinuation != null);
return responseList;
}
public <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable, FailureValidator validator) {
validateQueryFailure(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable,
FailureValidator validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.getEvents().get(1)).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@DataProvider
public static Object[][] clientBuilders() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithGateway() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)},
{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}
};
}
@DataProvider
public static Object[][] clientBuilderSolelyDirectWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)}
};
}
static ConsistencyLevel parseConsistency(String consistency) {
if (consistency != null) {
consistency = CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency).trim();
return ConsistencyLevel.valueOf(consistency);
}
logger.error("INVALID configured test consistency [{}].", consistency);
throw new IllegalStateException("INVALID configured test consistency " + consistency);
}
static List<String> parsePreferredLocation(String preferredLocations) {
if (StringUtils.isEmpty(preferredLocations)) {
return null;
}
try {
return objectMapper.readValue(preferredLocations, new TypeReference<List<String>>() {
});
} catch (Exception e) {
logger.error("INVALID configured test preferredLocations [{}].", preferredLocations);
throw new IllegalStateException("INVALID configured test preferredLocations " + preferredLocations);
}
}
static List<Protocol> parseProtocols(String protocols) {
if (StringUtils.isEmpty(protocols)) {
return null;
}
List<Protocol> protocolList = new ArrayList<>();
try {
List<String> protocolStrings = objectMapper.readValue(protocols, new TypeReference<List<String>>() {
});
for(String protocol : protocolStrings) {
protocolList.add(Protocol.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, protocol)));
}
return protocolList;
} catch (Exception e) {
logger.error("INVALID configured test protocols [{}].", protocols);
throw new IllegalStateException("INVALID configured test protocols " + protocols);
}
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirect() {
return simpleClientBuildersWithDirect(true, true, true, toArray(protocols));
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectHttps() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcp() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithJustDirectTcp() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcpWithoutRetryOnThrottledRequests() {
return new Object[][]{
{ createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, false) },
{ createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, false) }
};
}
private static Object[][] simpleClientBuildersWithDirect(
boolean contentResponseOnWriteEnabled,
Protocol... protocols) {
return simpleClientBuildersWithDirect(true, contentResponseOnWriteEnabled, true, protocols);
}
private static Object[][] simpleClientBuildersWithDirect(
boolean includeGateway,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
List<ConsistencyLevel> testConsistencies = ImmutableList.of(ConsistencyLevel.EVENTUAL);
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(
consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
if (includeGateway) {
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
false,
null,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
}
return cosmosConfigurations.stream().map(b -> new Object[]{b}).collect(Collectors.toList()).toArray(new Object[0][]);
}
@DataProvider
public static Object[][] clientBuildersWithDirect() {
return clientBuildersWithDirectAllConsistencies(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectHttps() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcp() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return clientBuildersWithDirectAllConsistencies(false, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithContentResponseOnWriteEnabledAndDisabled() {
Object[][] clientBuildersWithDisabledContentResponseOnWrite =
clientBuildersWithDirectSession(false, true, Protocol.TCP);
Object[][] clientBuildersWithEnabledContentResponseOnWrite =
clientBuildersWithDirectSession(true, true, Protocol.TCP);
int length = clientBuildersWithDisabledContentResponseOnWrite.length
+ clientBuildersWithEnabledContentResponseOnWrite.length;
Object[][] clientBuilders = new Object[length][];
int index = 0;
for (int i = 0; i < clientBuildersWithDisabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithDisabledContentResponseOnWrite[i];
}
for (int i = 0; i < clientBuildersWithEnabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithEnabledContentResponseOnWrite[i];
}
return clientBuilders;
}
@DataProvider
public static Object[][] clientBuildersWithDirectSession() {
return clientBuildersWithDirectSession(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectSessionIncludeComputeGateway() {
Object[][] originalProviders = clientBuildersWithDirectSession(
true,
true,
toArray(protocols));
List<Object[]> providers = new ArrayList<>(Arrays.asList(originalProviders));
Object[] injectedProviderParameters = new Object[1];
CosmosClientBuilder builder = createGatewayRxDocumentClient(
TestConfigurations.HOST.replace(ROUTING_GATEWAY_EMULATOR_PORT, COMPUTE_GATEWAY_EMULATOR_PORT),
ConsistencyLevel.SESSION,
false,
null,
true,
true);
injectedProviderParameters[0] = builder;
providers.add(injectedProviderParameters);
Object[][] array = new Object[providers.size()][];
return providers.toArray(array);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpSession() {
return clientBuildersWithDirectSession(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuilderGatewaySession() {
return clientBuildersWithDirectSession(true, true);
}
static Protocol[] toArray(List<Protocol> protocols) {
return protocols.toArray(new Protocol[protocols.size()]);
}
private static Object[][] clientBuildersWithDirectSession(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
return clientBuildersWithDirect(new ArrayList<ConsistencyLevel>() {{
add(ConsistencyLevel.SESSION);
}}, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
private static Object[][] clientBuildersWithDirectAllConsistencies(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
return clientBuildersWithDirect(desiredConsistencies, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
static List<ConsistencyLevel> parseDesiredConsistencies(String consistencies) {
if (StringUtils.isEmpty(consistencies)) {
return null;
}
List<ConsistencyLevel> consistencyLevels = new ArrayList<>();
try {
List<String> consistencyStrings = objectMapper.readValue(consistencies, new TypeReference<List<String>>() {});
for(String consistency : consistencyStrings) {
consistencyLevels.add(ConsistencyLevel.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency)));
}
return consistencyLevels;
} catch (Exception e) {
logger.error("INVALID consistency test desiredConsistencies [{}].", consistencies);
throw new IllegalStateException("INVALID configured test desiredConsistencies " + consistencies);
}
}
@SuppressWarnings("fallthrough")
static List<ConsistencyLevel> allEqualOrLowerConsistencies(ConsistencyLevel accountConsistency) {
List<ConsistencyLevel> testConsistencies = new ArrayList<>();
switch (accountConsistency) {
case STRONG:
testConsistencies.add(ConsistencyLevel.STRONG);
case BOUNDED_STALENESS:
testConsistencies.add(ConsistencyLevel.BOUNDED_STALENESS);
case SESSION:
testConsistencies.add(ConsistencyLevel.SESSION);
case CONSISTENT_PREFIX:
testConsistencies.add(ConsistencyLevel.CONSISTENT_PREFIX);
case EVENTUAL:
testConsistencies.add(ConsistencyLevel.EVENTUAL);
break;
default:
throw new IllegalStateException("INVALID configured test consistency " + accountConsistency);
}
return testConsistencies;
}
private static Object[][] clientBuildersWithDirect(
List<ConsistencyLevel> testConsistencies,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
return cosmosConfigurations.stream().map(c -> new Object[]{c}).collect(Collectors.toList()).toArray(new Object[0][]);
}
static protected CosmosClientBuilder createGatewayHouseKeepingDocumentClient(boolean contentResponseOnWriteEnabled) {
ThrottlingRetryOptions options = new ThrottlingRetryOptions();
options.setMaxRetryWaitTime(Duration.ofSeconds(SUITE_SETUP_TIMEOUT));
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
return new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.throttlingRetryOptions(options)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(ConsistencyLevel.SESSION);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
return createGatewayRxDocumentClient(
TestConfigurations.HOST,
consistencyLevel,
multiMasterEnabled,
preferredRegions,
contentResponseOnWriteEnabled,
retryOnThrottledRequests);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
String endpoint,
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(endpoint)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.multipleWriteRegionsEnabled(multiMasterEnabled)
.preferredRegions(preferredRegions)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
ImplementationBridgeHelpers
.CosmosClientBuilderHelper
.getCosmosClientBuilderAccessor()
.buildConnectionPolicy(builder);
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
return builder;
}
static protected CosmosClientBuilder createGatewayRxDocumentClient() {
return createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true);
}
static protected CosmosClientBuilder createDirectRxDocumentClient(ConsistencyLevel consistencyLevel,
Protocol protocol,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.directMode(DirectConnectionConfig.getDefaultConfig())
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
if (preferredRegions != null) {
builder.preferredRegions(preferredRegions);
}
if (multiMasterEnabled && consistencyLevel == ConsistencyLevel.SESSION) {
builder.multipleWriteRegionsEnabled(true);
}
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
Configs configs = spy(new Configs());
doAnswer((Answer<Protocol>)invocation -> protocol).when(configs).getProtocol();
return injectConfigs(builder, configs);
}
protected int expectedNumberOfPages(int totalExpectedResult, int maxPageSize) {
return Math.max((totalExpectedResult + maxPageSize - 1 ) / maxPageSize, 1);
}
@DataProvider(name = "queryMetricsArgProvider")
public Object[][] queryMetricsArgProvider() {
return new Object[][]{
{true},
{false},
{null}
};
}
@DataProvider(name = "queryWithOrderByProvider")
public Object[][] queryWithOrderBy() {
return new Object[][]{
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c.id DESC", true },
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c._ts DESC", false }
};
}
public static CosmosClientBuilder copyCosmosClientBuilder(CosmosClientBuilder builder) {
return CosmosBridgeInternal.cloneCosmosClientBuilder(builder);
}
public byte[] decodeHexString(String string) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
for (int i = 0; i < string.length(); i+=2) {
int b = Integer.parseInt(string.substring(i, i + 2), 16);
outputStream.write(b);
}
return outputStream.toByteArray();
}
} | public static Object[][] simpleClientBuildersWithDirectTcpWithoutRetryOnThrottledRequests() { | protected static void truncateCollection(CosmosAsyncContainer cosmosContainer) {
CosmosContainerProperties cosmosContainerProperties = cosmosContainer.read().block().getProperties();
String cosmosContainerId = cosmosContainerProperties.getId();
logger.info("Truncating collection {} ...", cosmosContainerId);
List<String> paths = cosmosContainerProperties.getPartitionKeyDefinition().getPaths();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(-1);
int maxItemCount = 100;
logger.info("Truncating collection {} documents ...", cosmosContainer.getId());
cosmosContainer.queryItems("SELECT * FROM root", options, InternalObjectNode.class)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(doc -> {
PartitionKey partitionKey = null;
Object propertyValue = null;
if (paths != null && !paths.isEmpty()) {
List<String> pkPath = PathParser.getPathParts(paths.get(0));
propertyValue = ModelBridgeInternal.getObjectByPathFromJsonSerializable(doc, pkPath);
if (propertyValue == null) {
partitionKey = PartitionKey.NONE;
} else {
partitionKey = new PartitionKey(propertyValue);
}
} else {
partitionKey = new PartitionKey(null);
}
return cosmosContainer.deleteItem(doc.getId(), partitionKey);
}).then().block();
logger.info("Truncating collection {} triggers ...", cosmosContainerId);
cosmosContainer.getScripts().queryTriggers("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(trigger -> {
return cosmosContainer.getScripts().getTrigger(trigger.getId()).delete();
}).then().block();
logger.info("Truncating collection {} storedProcedures ...", cosmosContainerId);
cosmosContainer.getScripts().queryStoredProcedures("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(storedProcedure -> {
return cosmosContainer.getScripts().getStoredProcedure(storedProcedure.getId()).delete(new CosmosStoredProcedureRequestOptions());
}).then().block();
logger.info("Truncating collection {} udfs ...", cosmosContainerId);
cosmosContainer.getScripts().queryUserDefinedFunctions("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(udf -> {
return cosmosContainer.getScripts().getUserDefinedFunction(udf.getId()).delete();
}).then().block();
logger.info("Finished truncating collection {}.", cosmosContainerId);
}
@SuppressWarnings({"fallthrough"})
protected static void waitIfNeededForReplicasToCatchUp(CosmosClientBuilder clientBuilder) {
switch (CosmosBridgeInternal.getConsistencyLevel(clientBuilder)) {
case EVENTUAL:
case CONSISTENT_PREFIX:
logger.info(" additional wait in EVENTUAL mode so the replica catch up");
try {
TimeUnit.MILLISECONDS.sleep(WAIT_REPLICA_CATCH_UP_IN_MILLIS);
} catch (Exception e) {
logger.error("unexpected failure", e);
}
case SESSION:
case BOUNDED_STALENESS:
case STRONG:
default:
break;
}
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options, int throughput) {
database.createContainer(cosmosContainerProperties, ThroughputProperties.createManualThroughput(throughput), options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options) {
database.createContainer(cosmosContainerProperties, options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
private static CosmosContainerProperties getCollectionDefinitionMultiPartitionWithCompositeAndSpatialIndexes() {
final String NUMBER_FIELD = "numberField";
final String STRING_FIELD = "stringField";
final String NUMBER_FIELD_2 = "numberField2";
final String STRING_FIELD_2 = "stringField2";
final String BOOL_FIELD = "boolField";
final String NULL_FIELD = "nullField";
final String OBJECT_FIELD = "objectField";
final String ARRAY_FIELD = "arrayField";
final String SHORT_STRING_FIELD = "shortStringField";
final String MEDIUM_STRING_FIELD = "mediumStringField";
final String LONG_STRING_FIELD = "longStringField";
final String PARTITION_KEY = "pk";
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
ArrayList<String> partitionKeyPaths = new ArrayList<String>();
partitionKeyPaths.add("/" + PARTITION_KEY);
partitionKeyDefinition.setPaths(partitionKeyPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<List<CompositePath>> compositeIndexes = new ArrayList<>();
ArrayList<CompositePath> compositeIndexSimple = new ArrayList<CompositePath>();
CompositePath compositePath1 = new CompositePath();
compositePath1.setPath("/" + NUMBER_FIELD);
compositePath1.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath2 = new CompositePath();
compositePath2.setPath("/" + STRING_FIELD);
compositePath2.setOrder(CompositePathSortOrder.DESCENDING);
compositeIndexSimple.add(compositePath1);
compositeIndexSimple.add(compositePath2);
ArrayList<CompositePath> compositeIndexMaxColumns = new ArrayList<CompositePath>();
CompositePath compositePath3 = new CompositePath();
compositePath3.setPath("/" + NUMBER_FIELD);
compositePath3.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath4 = new CompositePath();
compositePath4.setPath("/" + STRING_FIELD);
compositePath4.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath5 = new CompositePath();
compositePath5.setPath("/" + NUMBER_FIELD_2);
compositePath5.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath6 = new CompositePath();
compositePath6.setPath("/" + STRING_FIELD_2);
compositePath6.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexMaxColumns.add(compositePath3);
compositeIndexMaxColumns.add(compositePath4);
compositeIndexMaxColumns.add(compositePath5);
compositeIndexMaxColumns.add(compositePath6);
ArrayList<CompositePath> compositeIndexPrimitiveValues = new ArrayList<CompositePath>();
CompositePath compositePath7 = new CompositePath();
compositePath7.setPath("/" + NUMBER_FIELD);
compositePath7.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath8 = new CompositePath();
compositePath8.setPath("/" + STRING_FIELD);
compositePath8.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath9 = new CompositePath();
compositePath9.setPath("/" + BOOL_FIELD);
compositePath9.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath10 = new CompositePath();
compositePath10.setPath("/" + NULL_FIELD);
compositePath10.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexPrimitiveValues.add(compositePath7);
compositeIndexPrimitiveValues.add(compositePath8);
compositeIndexPrimitiveValues.add(compositePath9);
compositeIndexPrimitiveValues.add(compositePath10);
ArrayList<CompositePath> compositeIndexLongStrings = new ArrayList<CompositePath>();
CompositePath compositePath11 = new CompositePath();
compositePath11.setPath("/" + STRING_FIELD);
CompositePath compositePath12 = new CompositePath();
compositePath12.setPath("/" + SHORT_STRING_FIELD);
CompositePath compositePath13 = new CompositePath();
compositePath13.setPath("/" + MEDIUM_STRING_FIELD);
CompositePath compositePath14 = new CompositePath();
compositePath14.setPath("/" + LONG_STRING_FIELD);
compositeIndexLongStrings.add(compositePath11);
compositeIndexLongStrings.add(compositePath12);
compositeIndexLongStrings.add(compositePath13);
compositeIndexLongStrings.add(compositePath14);
compositeIndexes.add(compositeIndexSimple);
compositeIndexes.add(compositeIndexMaxColumns);
compositeIndexes.add(compositeIndexPrimitiveValues);
compositeIndexes.add(compositeIndexLongStrings);
indexingPolicy.setCompositeIndexes(compositeIndexes);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static CosmosAsyncContainer createCollection(CosmosAsyncClient client, String dbId, CosmosContainerProperties collectionDefinition) {
CosmosAsyncDatabase database = client.getDatabase(dbId);
database.createContainer(collectionDefinition).block();
return database.getContainer(collectionDefinition.getId());
}
public static void deleteCollection(CosmosAsyncClient client, String dbId, String collectionId) {
client.getDatabase(dbId).getContainer(collectionId).delete().block();
}
public static InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, InternalObjectNode item) {
return BridgeInternal.getProperties(cosmosContainer.createItem(item).block());
}
public <T> Flux<CosmosItemResponse<T>> bulkInsert(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList,
int concurrencyLevel) {
List<Mono<CosmosItemResponse<T>>> result =
new ArrayList<>(documentDefinitionList.size());
for (T docDef : documentDefinitionList) {
result.add(cosmosContainer.createItem(docDef));
}
return Flux.merge(Flux.fromIterable(result), concurrencyLevel);
}
public <T> List<T> bulkInsertBlocking(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList) {
return bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.map(itemResponse -> itemResponse.getItem())
.collectList()
.block();
}
public <T> void voidBulkInsertBlocking(CosmosAsyncContainer cosmosContainer, List<T> documentDefinitionList) {
bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.then()
.block();
}
public static CosmosAsyncUser createUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties userSettings) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
CosmosUserResponse userResponse = database.createUser(userSettings).block();
return database.getUser(userResponse.getProperties().getId());
}
public static CosmosAsyncUser safeCreateUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties user) {
deleteUserIfExists(client, databaseId, user.getId());
return createUser(client, databaseId, user);
}
private static CosmosAsyncContainer safeCreateCollection(CosmosAsyncClient client, String databaseId, CosmosContainerProperties collection, CosmosContainerRequestOptions options) {
deleteCollectionIfExists(client, databaseId, collection.getId());
return createCollection(client.getDatabase(databaseId), collection, options);
}
static protected CosmosContainerProperties getCollectionDefinitionWithFullFidelity() {
CosmosContainerProperties cosmosContainerProperties = getCollectionDefinition(UUID.randomUUID().toString());
cosmosContainerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(5)));
return cosmosContainerProperties;
}
static protected CosmosContainerProperties getCollectionDefinition() {
return getCollectionDefinition(UUID.randomUUID().toString());
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId, PartitionKeyDefinition partitionKeyDefinition) {
return new CosmosContainerProperties(collectionId, partitionKeyDefinition);
}
static protected CosmosContainerProperties getCollectionDefinitionForHashV2(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
partitionKeyDef.setVersion(PartitionKeyDefinitionVersion.V2);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndexWithIdAsPartitionKey() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/id"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/mypk"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex(List<String> partitionKeyPath) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
partitionKeyDef.setPaths(partitionKeyPath);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static void deleteCollectionIfExists(CosmosAsyncClient client, String databaseId, String collectionId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
List<CosmosContainerProperties> res = database.queryContainers(String.format("SELECT * FROM root r where r.id = '%s'", collectionId), null)
.collectList()
.block();
if (!res.isEmpty()) {
deleteCollection(database, collectionId);
}
}
public static void deleteCollection(CosmosAsyncDatabase cosmosDatabase, String collectionId) {
cosmosDatabase.getContainer(collectionId).delete().block();
}
public static void deleteCollection(CosmosAsyncContainer cosmosContainer) {
cosmosContainer.delete().block();
}
public static void deleteDocumentIfExists(CosmosAsyncClient client, String databaseId, String collectionId, String docId) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(docId));
CosmosAsyncContainer cosmosContainer = client.getDatabase(databaseId).getContainer(collectionId);
List<InternalObjectNode> res = cosmosContainer
.queryItems(String.format("SELECT * FROM root r where r.id = '%s'", docId), options, InternalObjectNode.class)
.byPage()
.flatMap(page -> Flux.fromIterable(page.getResults()))
.collectList().block();
if (!res.isEmpty()) {
deleteDocument(cosmosContainer, docId);
}
}
public static void safeDeleteDocument(CosmosAsyncContainer cosmosContainer, String documentId, Object partitionKey) {
if (cosmosContainer != null && documentId != null) {
try {
cosmosContainer.deleteItem(documentId, new PartitionKey(partitionKey)).block();
} catch (Exception e) {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null || dce.getStatusCode() != 404) {
throw e;
}
}
}
}
public static void deleteDocument(CosmosAsyncContainer cosmosContainer, String documentId) {
cosmosContainer.deleteItem(documentId, PartitionKey.NONE).block();
}
public static void deleteUserIfExists(CosmosAsyncClient client, String databaseId, String userId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
client.getDatabase(databaseId).read().block();
List<CosmosUserProperties> res = database
.queryUsers(String.format("SELECT * FROM root r where r.id = '%s'", userId), null)
.collectList().block();
if (!res.isEmpty()) {
deleteUser(database, userId);
}
}
public static void deleteUser(CosmosAsyncDatabase database, String userId) {
database.getUser(userId).delete().block();
}
static private CosmosAsyncDatabase safeCreateDatabase(CosmosAsyncClient client, CosmosDatabaseProperties databaseSettings) {
safeDeleteDatabase(client.getDatabase(databaseSettings.getId()));
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosAsyncDatabase createDatabase(CosmosAsyncClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosDatabase createSyncDatabase(CosmosClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
try {
client.createDatabase(databaseSettings);
return client.getDatabase(databaseSettings.getId());
} catch (CosmosException e) {
e.printStackTrace();
}
return null;
}
static protected CosmosAsyncDatabase createDatabaseIfNotExists(CosmosAsyncClient client, String databaseId) {
List<CosmosDatabaseProperties> res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null)
.collectList()
.block();
if (res.size() != 0) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
return database;
} else {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
}
static protected void safeDeleteDatabase(CosmosAsyncDatabase database) {
if (database != null) {
try {
database.delete().block();
} catch (Exception e) {
}
}
}
static protected void safeDeleteSyncDatabase(CosmosDatabase database) {
if (database != null) {
try {
logger.info("attempting to delete database ....");
database.delete();
logger.info("database deletion completed");
} catch (Exception e) {
logger.error("failed to delete sync database", e);
}
}
}
static protected void safeDeleteAllCollections(CosmosAsyncDatabase database) {
if (database != null) {
List<CosmosContainerProperties> collections = database.readAllContainers()
.collectList()
.block();
for(CosmosContainerProperties collection: collections) {
database.getContainer(collection.getId()).delete().block();
}
}
}
static protected void safeDeleteCollection(CosmosAsyncContainer collection) {
if (collection != null) {
try {
logger.info("attempting to delete container {}.{}....",
collection.getDatabase().getId(),
collection.getId());
collection.delete().block();
logger.info("Container {}.{} deletion completed",
collection.getDatabase().getId(),
collection.getId());
} catch (Exception e) {
boolean shouldLogAsError = true;
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
if (cosmosException.getStatusCode() == 404) {
shouldLogAsError = false;
logger.info(
"Container {}.{} does not exist anymore.",
collection.getDatabase().getId(),
collection.getId());
}
}
if (shouldLogAsError) {
logger.error("failed to delete sync container {}.{}",
collection.getDatabase().getId(),
collection.getId(),
e);
}
}
finally {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
}
static protected void safeDeleteCollection(CosmosAsyncDatabase database, String collectionId) {
if (database != null && collectionId != null) {
try {
safeDeleteCollection(database.getContainer(collectionId));
} catch (Exception e) {
}
}
}
static protected void safeCloseAsync(CosmosAsyncClient client) {
if (client != null) {
new Thread(() -> {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}).start();
}
}
static protected void safeClose(CosmosAsyncClient client) {
if (client != null) {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
static protected void safeCloseSyncClient(CosmosClient client) {
if (client != null) {
try {
logger.info("closing client ...");
client.close();
logger.info("closing client completed");
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator) {
validateSuccess(single, validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator, long timeout) {
validateSuccess(single.flux(), validator, timeout);
}
@SuppressWarnings("rawtypes")
public static <T extends CosmosResponse> void validateSuccess(Flux<T> flowable,
CosmosResponseValidator<T> validator, long timeout) {
TestSubscriber<T> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T, U extends CosmosResponse> void validateFailure(Mono<U> mono, FailureValidator validator)
throws InterruptedException {
validateFailure(mono.flux(), validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public static <T extends Resource, U extends CosmosResponse> void validateFailure(Flux<U> flowable,
FailureValidator validator, long timeout) throws InterruptedException {
TestSubscriber<CosmosResponse> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemSuccess(
Mono<T> responseMono, CosmosItemResponseValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemFailure(
Mono<T> responseMono, FailureValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
public <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator) {
validateQuerySuccess(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
validator.validate(testSubscriber.values());
}
public static <T> void validateQuerySuccessWithContinuationTokenAndSizes(
String query,
CosmosAsyncContainer container,
int[] pageSizes,
FeedResponseListValidator<T> validator,
Class<T> classType) {
for (int pageSize : pageSizes) {
List<FeedResponse<T>> receivedDocuments = queryWithContinuationTokens(query, container, pageSize, classType);
validator.validate(receivedDocuments);
}
}
public static <T> List<FeedResponse<T>> queryWithContinuationTokens(
String query,
CosmosAsyncContainer container,
int pageSize,
Class<T> classType) {
String requestContinuation = null;
List<String> continuationTokens = new ArrayList<String>();
List<FeedResponse<T>> responseList = new ArrayList<>();
do {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<T> queryObservable = container.queryItems(query, options, classType);
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
@SuppressWarnings("unchecked")
FeedResponse<T> firstPage = (FeedResponse<T>) testSubscriber.getEvents().get(0).get(0);
requestContinuation = firstPage.getContinuationToken();
responseList.add(firstPage);
continuationTokens.add(requestContinuation);
} while (requestContinuation != null);
return responseList;
}
public <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable, FailureValidator validator) {
validateQueryFailure(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable,
FailureValidator validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.getEvents().get(1)).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@DataProvider
public static Object[][] clientBuilders() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithGateway() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)},
{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}
};
}
@DataProvider
public static Object[][] clientBuilderSolelyDirectWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)}
};
}
static ConsistencyLevel parseConsistency(String consistency) {
if (consistency != null) {
consistency = CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency).trim();
return ConsistencyLevel.valueOf(consistency);
}
logger.error("INVALID configured test consistency [{}].", consistency);
throw new IllegalStateException("INVALID configured test consistency " + consistency);
}
static List<String> parsePreferredLocation(String preferredLocations) {
if (StringUtils.isEmpty(preferredLocations)) {
return null;
}
try {
return objectMapper.readValue(preferredLocations, new TypeReference<List<String>>() {
});
} catch (Exception e) {
logger.error("INVALID configured test preferredLocations [{}].", preferredLocations);
throw new IllegalStateException("INVALID configured test preferredLocations " + preferredLocations);
}
}
static List<Protocol> parseProtocols(String protocols) {
if (StringUtils.isEmpty(protocols)) {
return null;
}
List<Protocol> protocolList = new ArrayList<>();
try {
List<String> protocolStrings = objectMapper.readValue(protocols, new TypeReference<List<String>>() {
});
for(String protocol : protocolStrings) {
protocolList.add(Protocol.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, protocol)));
}
return protocolList;
} catch (Exception e) {
logger.error("INVALID configured test protocols [{}].", protocols);
throw new IllegalStateException("INVALID configured test protocols " + protocols);
}
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirect() {
return simpleClientBuildersWithDirect(true, true, true, toArray(protocols));
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectHttps() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcp() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithJustDirectTcp() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithoutRetryOnThrottledRequests() {
return new Object[][]{
{ createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, false) },
{ createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, false) }
};
}
private static Object[][] simpleClientBuildersWithDirect(
boolean contentResponseOnWriteEnabled,
Protocol... protocols) {
return simpleClientBuildersWithDirect(true, contentResponseOnWriteEnabled, true, protocols);
}
private static Object[][] simpleClientBuildersWithDirect(
boolean includeGateway,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
List<ConsistencyLevel> testConsistencies = ImmutableList.of(ConsistencyLevel.EVENTUAL);
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(
consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
if (includeGateway) {
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
false,
null,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
}
return cosmosConfigurations.stream().map(b -> new Object[]{b}).collect(Collectors.toList()).toArray(new Object[0][]);
}
@DataProvider
public static Object[][] clientBuildersWithDirect() {
return clientBuildersWithDirectAllConsistencies(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectHttps() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcp() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return clientBuildersWithDirectAllConsistencies(false, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithContentResponseOnWriteEnabledAndDisabled() {
Object[][] clientBuildersWithDisabledContentResponseOnWrite =
clientBuildersWithDirectSession(false, true, Protocol.TCP);
Object[][] clientBuildersWithEnabledContentResponseOnWrite =
clientBuildersWithDirectSession(true, true, Protocol.TCP);
int length = clientBuildersWithDisabledContentResponseOnWrite.length
+ clientBuildersWithEnabledContentResponseOnWrite.length;
Object[][] clientBuilders = new Object[length][];
int index = 0;
for (int i = 0; i < clientBuildersWithDisabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithDisabledContentResponseOnWrite[i];
}
for (int i = 0; i < clientBuildersWithEnabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithEnabledContentResponseOnWrite[i];
}
return clientBuilders;
}
@DataProvider
public static Object[][] clientBuildersWithDirectSession() {
return clientBuildersWithDirectSession(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectSessionIncludeComputeGateway() {
Object[][] originalProviders = clientBuildersWithDirectSession(
true,
true,
toArray(protocols));
List<Object[]> providers = new ArrayList<>(Arrays.asList(originalProviders));
Object[] injectedProviderParameters = new Object[1];
CosmosClientBuilder builder = createGatewayRxDocumentClient(
TestConfigurations.HOST.replace(ROUTING_GATEWAY_EMULATOR_PORT, COMPUTE_GATEWAY_EMULATOR_PORT),
ConsistencyLevel.SESSION,
false,
null,
true,
true);
injectedProviderParameters[0] = builder;
providers.add(injectedProviderParameters);
Object[][] array = new Object[providers.size()][];
return providers.toArray(array);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpSession() {
return clientBuildersWithDirectSession(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuilderGatewaySession() {
return clientBuildersWithDirectSession(true, true);
}
static Protocol[] toArray(List<Protocol> protocols) {
return protocols.toArray(new Protocol[protocols.size()]);
}
private static Object[][] clientBuildersWithDirectSession(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
return clientBuildersWithDirect(new ArrayList<ConsistencyLevel>() {{
add(ConsistencyLevel.SESSION);
}}, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
private static Object[][] clientBuildersWithDirectAllConsistencies(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
return clientBuildersWithDirect(desiredConsistencies, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
static List<ConsistencyLevel> parseDesiredConsistencies(String consistencies) {
if (StringUtils.isEmpty(consistencies)) {
return null;
}
List<ConsistencyLevel> consistencyLevels = new ArrayList<>();
try {
List<String> consistencyStrings = objectMapper.readValue(consistencies, new TypeReference<List<String>>() {});
for(String consistency : consistencyStrings) {
consistencyLevels.add(ConsistencyLevel.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency)));
}
return consistencyLevels;
} catch (Exception e) {
logger.error("INVALID consistency test desiredConsistencies [{}].", consistencies);
throw new IllegalStateException("INVALID configured test desiredConsistencies " + consistencies);
}
}
@SuppressWarnings("fallthrough")
static List<ConsistencyLevel> allEqualOrLowerConsistencies(ConsistencyLevel accountConsistency) {
List<ConsistencyLevel> testConsistencies = new ArrayList<>();
switch (accountConsistency) {
case STRONG:
testConsistencies.add(ConsistencyLevel.STRONG);
case BOUNDED_STALENESS:
testConsistencies.add(ConsistencyLevel.BOUNDED_STALENESS);
case SESSION:
testConsistencies.add(ConsistencyLevel.SESSION);
case CONSISTENT_PREFIX:
testConsistencies.add(ConsistencyLevel.CONSISTENT_PREFIX);
case EVENTUAL:
testConsistencies.add(ConsistencyLevel.EVENTUAL);
break;
default:
throw new IllegalStateException("INVALID configured test consistency " + accountConsistency);
}
return testConsistencies;
}
private static Object[][] clientBuildersWithDirect(
List<ConsistencyLevel> testConsistencies,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
return cosmosConfigurations.stream().map(c -> new Object[]{c}).collect(Collectors.toList()).toArray(new Object[0][]);
}
static protected CosmosClientBuilder createGatewayHouseKeepingDocumentClient(boolean contentResponseOnWriteEnabled) {
ThrottlingRetryOptions options = new ThrottlingRetryOptions();
options.setMaxRetryWaitTime(Duration.ofSeconds(SUITE_SETUP_TIMEOUT));
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
return new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.throttlingRetryOptions(options)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(ConsistencyLevel.SESSION);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
return createGatewayRxDocumentClient(
TestConfigurations.HOST,
consistencyLevel,
multiMasterEnabled,
preferredRegions,
contentResponseOnWriteEnabled,
retryOnThrottledRequests);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
String endpoint,
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(endpoint)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.multipleWriteRegionsEnabled(multiMasterEnabled)
.preferredRegions(preferredRegions)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
ImplementationBridgeHelpers
.CosmosClientBuilderHelper
.getCosmosClientBuilderAccessor()
.buildConnectionPolicy(builder);
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
return builder;
}
static protected CosmosClientBuilder createGatewayRxDocumentClient() {
return createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true);
}
static protected CosmosClientBuilder createDirectRxDocumentClient(ConsistencyLevel consistencyLevel,
Protocol protocol,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.directMode(DirectConnectionConfig.getDefaultConfig())
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
if (preferredRegions != null) {
builder.preferredRegions(preferredRegions);
}
if (multiMasterEnabled && consistencyLevel == ConsistencyLevel.SESSION) {
builder.multipleWriteRegionsEnabled(true);
}
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
Configs configs = spy(new Configs());
doAnswer((Answer<Protocol>)invocation -> protocol).when(configs).getProtocol();
return injectConfigs(builder, configs);
}
protected int expectedNumberOfPages(int totalExpectedResult, int maxPageSize) {
return Math.max((totalExpectedResult + maxPageSize - 1 ) / maxPageSize, 1);
}
@DataProvider(name = "queryMetricsArgProvider")
public Object[][] queryMetricsArgProvider() {
return new Object[][]{
{true},
{false},
{null}
};
}
@DataProvider(name = "queryWithOrderByProvider")
public Object[][] queryWithOrderBy() {
return new Object[][]{
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c.id DESC", true },
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c._ts DESC", false }
};
}
public static CosmosClientBuilder copyCosmosClientBuilder(CosmosClientBuilder builder) {
return CosmosBridgeInternal.cloneCosmosClientBuilder(builder);
}
public byte[] decodeHexString(String string) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
for (int i = 0; i < string.length(); i+=2) {
int b = Integer.parseInt(string.substring(i, i + 2), 16);
outputStream.write(b);
}
return outputStream.toByteArray();
}
} | class DatabaseManagerImpl implements CosmosDatabaseForTest.DatabaseManager {
public static DatabaseManagerImpl getInstance(CosmosAsyncClient client) {
return new DatabaseManagerImpl(client);
}
private final CosmosAsyncClient client;
private DatabaseManagerImpl(CosmosAsyncClient client) {
this.client = client;
}
@Override
public CosmosPagedFlux<CosmosDatabaseProperties> queryDatabases(SqlQuerySpec query) {
return client.queryDatabases(query, null);
}
@Override
public Mono<CosmosDatabaseResponse> createDatabase(CosmosDatabaseProperties databaseDefinition) {
return client.createDatabase(databaseDefinition);
}
@Override
public CosmosAsyncDatabase getDatabase(String id) {
return client.getDatabase(id);
}
} | class DatabaseManagerImpl implements CosmosDatabaseForTest.DatabaseManager {
public static DatabaseManagerImpl getInstance(CosmosAsyncClient client) {
return new DatabaseManagerImpl(client);
}
private final CosmosAsyncClient client;
private DatabaseManagerImpl(CosmosAsyncClient client) {
this.client = client;
}
@Override
public CosmosPagedFlux<CosmosDatabaseProperties> queryDatabases(SqlQuerySpec query) {
return client.queryDatabases(query, null);
}
@Override
public Mono<CosmosDatabaseResponse> createDatabase(CosmosDatabaseProperties databaseDefinition) {
return client.createDatabase(databaseDefinition);
}
@Override
public CosmosAsyncDatabase getDatabase(String id) {
return client.getDatabase(id);
}
} |
NIT: For code clean, we might be able to not repeat the similar code section. ``` // for each invalid transcription format of (TEXT, SRT, VTT) TranscriptionFormat transcriptionFormat = ...; translationOptions.setResponseFormat(transcriptionFormat); assertThrows(IllegalArgumentException.class, () -> { client.getAudioTranslation(deploymentName, translationOptions, fileName); }); | public void testGetAudioTranslationJsonWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(deploymentName, translationOptions, fileName);
});
translationOptions.setResponseFormat(AudioTranscriptionFormat.SRT);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(deploymentName, translationOptions, fileName);
});
translationOptions.setResponseFormat(AudioTranscriptionFormat.VTT);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(deploymentName, translationOptions, fileName);
});
});
} | }); | public void testGetAudioTranslationJsonWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.TEXT,
AudioTranscriptionFormat.SRT,
AudioTranscriptionFormat.VTT
);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
translationOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(deploymentName, fileName, translationOptions);
});
}
});
} | class OpenAISyncClientTest extends OpenAIClientTestBase {
private OpenAIClient client;
private OpenAIClient getOpenAIClient(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
return getOpenAIClientBuilder(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient, serviceVersion)
.buildClient();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
Completions resultCompletions = client.getCompletions(deploymentId, new CompletionsOptions(prompt));
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
IterableStream<Completions> resultCompletions = client.getCompletionsStream(deploymentId, new CompletionsOptions(prompt));
assertTrue(resultCompletions.stream().toArray().length > 1);
resultCompletions.forEach(OpenAIClientTestBase::assertCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsFromPrompt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsFromSinglePromptRunner((deploymentId, prompts) -> {
Completions completions = client.getCompletions(deploymentId, prompts);
assertCompletions(1, completions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
Response<BinaryData> response = client.getCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions());
Completions resultCompletions = assertAndGetValueFromResponse(response, Completions.class, 200);
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsWithResponseBadDeployment(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((_deploymentId, prompt) -> {
String deploymentId = "BAD_DEPLOYMENT_ID";
ResourceNotFoundException exception = assertThrows(ResourceNotFoundException.class,
() -> client.getCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions()));
assertEquals(404, exception.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsUsageField(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(1024);
completionsOptions.setN(3);
completionsOptions.setLogprobs(1);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
CompletionsUsage usage = resultCompletions.getUsage();
assertCompletions(completionsOptions.getN() * completionsOptions.getPrompt().size(), resultCompletions);
assertNotNull(usage);
assertTrue(usage.getTotalTokens() > 0);
assertEquals(usage.getCompletionTokens() + usage.getPromptTokens(), usage.getTotalTokens());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsTokenCutoff(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(3);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, "length", resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
ChatCompletions resultChatCompletions = client.getChatCompletions(deploymentId, new ChatCompletionsOptions(chatMessages));
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
IterableStream<ChatCompletions> resultChatCompletions = client.getChatCompletionsStream(deploymentId, new ChatCompletionsOptions(chatMessages));
assertTrue(resultChatCompletions.stream().toArray().length > 1);
resultChatCompletions.forEach(OpenAIClientTestBase::assertChatCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
Response<BinaryData> response = client.getChatCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new ChatCompletionsOptions(chatMessages)), new RequestOptions());
ChatCompletions resultChatCompletions = assertAndGetValueFromResponse(response, ChatCompletions.class, 200);
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getEmbeddingRunner((deploymentId, embeddingsOptions) -> {
Embeddings resultEmbeddings = client.getEmbeddings(deploymentId, embeddingsOptions);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getEmbeddingRunner((deploymentId, embeddingsOptions) -> {
Response<BinaryData> response = client.getEmbeddingsWithResponse(deploymentId,
BinaryData.fromObject(embeddingsOptions), new RequestOptions());
Embeddings resultEmbeddings = assertAndGetValueFromResponse(response, Embeddings.class, 200);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGenerateImage(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getImageGenerationRunner(options -> assertImageResponse(client.getImages(options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionAutoPreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.AUTO);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertEquals(1, chatCompletions.getChoices().size());
ChatChoice chatChoice = chatCompletions.getChoices().get(0);
MyFunctionCallArguments arguments = assertFunctionCall(
chatChoice,
"MyFunction",
MyFunctionCallArguments.class);
assertEquals(arguments.getLocation(), "San Francisco, CA");
assertEquals(arguments.getUnit(), "CELSIUS");
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNonePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.NONE);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertChatCompletions(1, "stop", ChatRole.ASSISTANT, chatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNotSuppliedByNamePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(new FunctionCallConfig("NotMyFunction"));
HttpResponseException exception = assertThrows(HttpResponseException.class,
() -> client.getChatCompletions(modelId, chatCompletionsOptions));
assertEquals(400, exception.getResponse().getStatusCode());
assertInstanceOf(HttpResponseException.class, exception);
assertTrue(exception.getMessage().contains("Invalid value for 'function_call'"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsContentFilterRunner((modelId, chatMessages) -> {
ChatCompletions chatCompletions = client.getChatCompletions(modelId, new ChatCompletionsOptions(chatMessages));
assertSafeContentFilterResults(chatCompletions.getPromptFilterResults().get(0).getContentFilterResults());
assertEquals(1, chatCompletions.getChoices().size());
assertSafeContentFilterResults(chatCompletions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionStreamContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsContentFilterRunner((modelId, chatMessages) -> {
IterableStream<ChatCompletions> messageList = client.getChatCompletionsStream(modelId, new ChatCompletionsOptions(chatMessages));
int i = 0;
int totalMessages = messageList.stream().toArray().length;
for (Iterator<ChatCompletions> it = messageList.iterator(); it.hasNext();) {
ChatCompletions chatCompletions = it.next();
assertChatCompletionsStream(chatCompletions);
if (i == 0) {
assertEquals(1, chatCompletions.getPromptFilterResults().size());
assertSafeContentFilterResults(chatCompletions.getPromptFilterResults().get(0).getContentFilterResults());
} else if (i == 1) {
assertEquals(ChatRole.ASSISTANT, chatCompletions.getChoices().get(0).getDelta().getRole());
assertNull(chatCompletions.getPromptFilterResults());
ContentFilterResults contentFilterResults = chatCompletions.getChoices().get(0).getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else if (i == totalMessages - 1) {
assertEquals(1, chatCompletions.getChoices().size());
ChatChoice chatChoice = chatCompletions.getChoices().get(0);
assertEquals(CompletionsFinishReason.fromString("stop"), chatChoice.getFinishReason());
assertNotNull(chatChoice.getDelta());
assertNull(chatChoice.getDelta().getContent());
ContentFilterResults contentFilterResults = chatChoice.getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else {
assertNull(chatCompletions.getPromptFilterResults());
assertNotNull(chatCompletions.getChoices().get(0).getDelta());
assertSafeContentFilterResults(chatCompletions.getChoices().get(0).getContentFilterResults());
}
i++;
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsContentFilterRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(Arrays.asList(prompt));
completionsOptions.setMaxTokens(2000);
Completions completions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, completions);
assertSafeContentFilterResults(completions.getPromptFilterResults().get(0).getContentFilterResults());
assertSafeContentFilterResults(completions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testCompletionStreamContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
IterableStream<Completions> resultCompletions = client.getCompletionsStream(deploymentId, new CompletionsOptions(prompt));
assertTrue(resultCompletions.stream().toArray().length > 1);
int i = 0;
int totalCompletions = resultCompletions.stream().toArray().length;
for (Iterator<Completions> it = resultCompletions.iterator(); it.hasNext();) {
Completions completions = it.next();
assertCompletionsStream(completions);
if (i == 0) {
assertEquals(1, completions.getPromptFilterResults().size());
assertSafeContentFilterResults(completions.getPromptFilterResults().get(0).getContentFilterResults());
} else if (i == totalCompletions - 1) {
assertEquals(1, completions.getChoices().size());
Choice choice = completions.getChoices().get(0);
assertEquals(CompletionsFinishReason.fromString("stop"), choice.getFinishReason());
assertNotNull(choice.getText());
ContentFilterResults contentFilterResults = choice.getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else {
assertNull(completions.getPromptFilterResults());
assertNotNull(completions.getChoices().get(0));
assertSafeContentFilterResults(completions.getChoices().get(0).getContentFilterResults());
}
i++;
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionsBasicSearchExtension(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsAzureChatSearchRunner((deploymentName, chatCompletionsOptions) -> {
AzureCognitiveSearchChatExtensionConfiguration cognitiveSearchConfiguration =
new AzureCognitiveSearchChatExtensionConfiguration(
"https:
getAzureCognitiveSearchKey(),
"openai-test-index-carbon-wiki"
);
AzureChatExtensionConfiguration extensionConfiguration =
new AzureChatExtensionConfiguration(
AzureChatExtensionType.AZURE_COGNITIVE_SEARCH,
BinaryData.fromObject(cognitiveSearchConfiguration));
chatCompletionsOptions.setDataSources(Arrays.asList(extensionConfiguration));
ChatCompletions chatCompletions = client.getChatCompletions(deploymentName, chatCompletionsOptions);
assertChatCompletionsCognitiveSearch(chatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionsStreamingBasicSearchExtension(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsAzureChatSearchRunner((deploymentName, chatCompletionsOptions) -> {
AzureCognitiveSearchChatExtensionConfiguration cognitiveSearchConfiguration =
new AzureCognitiveSearchChatExtensionConfiguration(
"https:
getAzureCognitiveSearchKey(),
"openai-test-index-carbon-wiki"
);
AzureChatExtensionConfiguration extensionConfiguration =
new AzureChatExtensionConfiguration(
AzureChatExtensionType.AZURE_COGNITIVE_SEARCH,
BinaryData.fromObject(cognitiveSearchConfiguration));
chatCompletionsOptions.setDataSources(Arrays.asList(extensionConfiguration));
IterableStream<ChatCompletions> resultChatCompletions = client.getChatCompletionsStream(deploymentName, chatCompletionsOptions);
assertChatCompletionsStreamingCognitiveSearch(resultChatCompletions.stream());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscription(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
AudioTranscription transcription = client.getAudioTranslation(deploymentName, translationOptions, fileName);
assertNotNull(transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationVerboseJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
AudioTranscription transcription = client.getAudioTranslation(deploymentName, translationOptions, fileName);
assertNotNull(transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextPlain(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
String transcription = client.getAudioTranslationText(deploymentName, translationOptions, fileName);
assertEquals("It's raining today.\n", transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextPlainWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslationText(deploymentName, translationOptions, fileName);
});
translationOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslationText(deploymentName, translationOptions, fileName);
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
} | class OpenAISyncClientTest extends OpenAIClientTestBase {
private OpenAIClient client;
private OpenAIClient getOpenAIClient(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
return getOpenAIClientBuilder(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient, serviceVersion)
.buildClient();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
Completions resultCompletions = client.getCompletions(deploymentId, new CompletionsOptions(prompt));
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
IterableStream<Completions> resultCompletions = client.getCompletionsStream(deploymentId, new CompletionsOptions(prompt));
assertTrue(resultCompletions.stream().toArray().length > 1);
resultCompletions.forEach(OpenAIClientTestBase::assertCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsFromPrompt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsFromSinglePromptRunner((deploymentId, prompts) -> {
Completions completions = client.getCompletions(deploymentId, prompts);
assertCompletions(1, completions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
Response<BinaryData> response = client.getCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions());
Completions resultCompletions = assertAndGetValueFromResponse(response, Completions.class, 200);
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsWithResponseBadDeployment(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((_deploymentId, prompt) -> {
String deploymentId = "BAD_DEPLOYMENT_ID";
ResourceNotFoundException exception = assertThrows(ResourceNotFoundException.class,
() -> client.getCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions()));
assertEquals(404, exception.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsUsageField(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(1024);
completionsOptions.setN(3);
completionsOptions.setLogprobs(1);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
CompletionsUsage usage = resultCompletions.getUsage();
assertCompletions(completionsOptions.getN() * completionsOptions.getPrompt().size(), resultCompletions);
assertNotNull(usage);
assertTrue(usage.getTotalTokens() > 0);
assertEquals(usage.getCompletionTokens() + usage.getPromptTokens(), usage.getTotalTokens());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsTokenCutoff(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(3);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, "length", resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
ChatCompletions resultChatCompletions = client.getChatCompletions(deploymentId, new ChatCompletionsOptions(chatMessages));
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
IterableStream<ChatCompletions> resultChatCompletions = client.getChatCompletionsStream(deploymentId, new ChatCompletionsOptions(chatMessages));
assertTrue(resultChatCompletions.stream().toArray().length > 1);
resultChatCompletions.forEach(OpenAIClientTestBase::assertChatCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
Response<BinaryData> response = client.getChatCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new ChatCompletionsOptions(chatMessages)), new RequestOptions());
ChatCompletions resultChatCompletions = assertAndGetValueFromResponse(response, ChatCompletions.class, 200);
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getEmbeddingRunner((deploymentId, embeddingsOptions) -> {
Embeddings resultEmbeddings = client.getEmbeddings(deploymentId, embeddingsOptions);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getEmbeddingRunner((deploymentId, embeddingsOptions) -> {
Response<BinaryData> response = client.getEmbeddingsWithResponse(deploymentId,
BinaryData.fromObject(embeddingsOptions), new RequestOptions());
Embeddings resultEmbeddings = assertAndGetValueFromResponse(response, Embeddings.class, 200);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGenerateImage(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getImageGenerationRunner(options -> assertImageResponse(client.getImages(options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionAutoPreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.AUTO);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertEquals(1, chatCompletions.getChoices().size());
ChatChoice chatChoice = chatCompletions.getChoices().get(0);
MyFunctionCallArguments arguments = assertFunctionCall(
chatChoice,
"MyFunction",
MyFunctionCallArguments.class);
assertEquals(arguments.getLocation(), "San Francisco, CA");
assertEquals(arguments.getUnit(), "CELSIUS");
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNonePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.NONE);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertChatCompletions(1, "stop", ChatRole.ASSISTANT, chatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNotSuppliedByNamePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(new FunctionCallConfig("NotMyFunction"));
HttpResponseException exception = assertThrows(HttpResponseException.class,
() -> client.getChatCompletions(modelId, chatCompletionsOptions));
assertEquals(400, exception.getResponse().getStatusCode());
assertInstanceOf(HttpResponseException.class, exception);
assertTrue(exception.getMessage().contains("Invalid value for 'function_call'"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, OpenAIServiceVersion.V2023_08_01_PREVIEW);
getChatCompletionsContentFilterRunner((modelId, chatMessages) -> {
ChatCompletions chatCompletions = client.getChatCompletions(modelId, new ChatCompletionsOptions(chatMessages));
assertSafeContentFilterResults(chatCompletions.getPromptFilterResults().get(0).getContentFilterResults());
assertEquals(1, chatCompletions.getChoices().size());
assertSafeContentFilterResults(chatCompletions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionStreamContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, OpenAIServiceVersion.V2023_08_01_PREVIEW);
getChatCompletionsContentFilterRunner((modelId, chatMessages) -> {
IterableStream<ChatCompletions> messageList = client.getChatCompletionsStream(modelId, new ChatCompletionsOptions(chatMessages));
int i = 0;
int totalMessages = messageList.stream().toArray().length;
for (Iterator<ChatCompletions> it = messageList.iterator(); it.hasNext();) {
ChatCompletions chatCompletions = it.next();
assertChatCompletionsStream(chatCompletions);
if (i == 0) {
assertEquals(1, chatCompletions.getPromptFilterResults().size());
assertSafeContentFilterResults(chatCompletions.getPromptFilterResults().get(0).getContentFilterResults());
} else if (i == 1) {
assertEquals(ChatRole.ASSISTANT, chatCompletions.getChoices().get(0).getDelta().getRole());
assertNull(chatCompletions.getPromptFilterResults());
ContentFilterResults contentFilterResults = chatCompletions.getChoices().get(0).getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else if (i == totalMessages - 1) {
assertEquals(1, chatCompletions.getChoices().size());
ChatChoice chatChoice = chatCompletions.getChoices().get(0);
assertEquals(CompletionsFinishReason.fromString("stop"), chatChoice.getFinishReason());
assertNotNull(chatChoice.getDelta());
assertNull(chatChoice.getDelta().getContent());
ContentFilterResults contentFilterResults = chatChoice.getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else {
assertNull(chatCompletions.getPromptFilterResults());
assertNotNull(chatCompletions.getChoices().get(0).getDelta());
assertSafeContentFilterResults(chatCompletions.getChoices().get(0).getContentFilterResults());
}
i++;
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, OpenAIServiceVersion.V2023_08_01_PREVIEW);
getCompletionsContentFilterRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(Arrays.asList(prompt));
completionsOptions.setMaxTokens(2000);
Completions completions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, completions);
assertSafeContentFilterResults(completions.getPromptFilterResults().get(0).getContentFilterResults());
assertSafeContentFilterResults(completions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testCompletionStreamContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
IterableStream<Completions> resultCompletions = client.getCompletionsStream(deploymentId, new CompletionsOptions(prompt));
assertTrue(resultCompletions.stream().toArray().length > 1);
int i = 0;
int totalCompletions = resultCompletions.stream().toArray().length;
for (Iterator<Completions> it = resultCompletions.iterator(); it.hasNext();) {
Completions completions = it.next();
assertCompletionsStream(completions);
if (i == 0) {
assertEquals(1, completions.getPromptFilterResults().size());
assertSafeContentFilterResults(completions.getPromptFilterResults().get(0).getContentFilterResults());
} else if (i == totalCompletions - 1) {
assertEquals(1, completions.getChoices().size());
Choice choice = completions.getChoices().get(0);
assertEquals(CompletionsFinishReason.fromString("stop"), choice.getFinishReason());
assertNotNull(choice.getText());
ContentFilterResults contentFilterResults = choice.getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else {
assertNull(completions.getPromptFilterResults());
assertNotNull(completions.getChoices().get(0));
assertSafeContentFilterResults(completions.getChoices().get(0).getContentFilterResults());
}
i++;
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionsBasicSearchExtension(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, OpenAIServiceVersion.V2023_08_01_PREVIEW);
getChatCompletionsAzureChatSearchRunner((deploymentName, chatCompletionsOptions) -> {
AzureCognitiveSearchChatExtensionConfiguration cognitiveSearchConfiguration =
new AzureCognitiveSearchChatExtensionConfiguration(
"https:
getAzureCognitiveSearchKey(),
"openai-test-index-carbon-wiki"
);
AzureChatExtensionConfiguration extensionConfiguration =
new AzureChatExtensionConfiguration(
AzureChatExtensionType.AZURE_COGNITIVE_SEARCH,
BinaryData.fromObject(cognitiveSearchConfiguration));
chatCompletionsOptions.setDataSources(Arrays.asList(extensionConfiguration));
ChatCompletions chatCompletions = client.getChatCompletions(deploymentName, chatCompletionsOptions);
assertChatCompletionsCognitiveSearch(chatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionsStreamingBasicSearchExtension(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, OpenAIServiceVersion.V2023_08_01_PREVIEW);
getChatCompletionsAzureChatSearchRunner((deploymentName, chatCompletionsOptions) -> {
AzureCognitiveSearchChatExtensionConfiguration cognitiveSearchConfiguration =
new AzureCognitiveSearchChatExtensionConfiguration(
"https:
getAzureCognitiveSearchKey(),
"openai-test-index-carbon-wiki"
);
AzureChatExtensionConfiguration extensionConfiguration =
new AzureChatExtensionConfiguration(
AzureChatExtensionType.AZURE_COGNITIVE_SEARCH,
BinaryData.fromObject(cognitiveSearchConfiguration));
chatCompletionsOptions.setDataSources(Arrays.asList(extensionConfiguration));
IterableStream<ChatCompletions> resultChatCompletions = client.getChatCompletionsStream(deploymentName, chatCompletionsOptions);
assertChatCompletionsStreamingCognitiveSearch(resultChatCompletions.stream());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
AudioTranscription transcription = client.getAudioTranscription(deploymentName, fileName, transcriptionOptions);
assertAudioTranscriptionSimpleJson(transcription, BATMAN_TRANSCRIPTION);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionVerboseJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
AudioTranscription transcription = client.getAudioTranscription(deploymentName, fileName, transcriptionOptions);
assertAudioTranscriptionVerboseJson(transcription, BATMAN_TRANSCRIPTION, AudioTaskLabel.TRANSCRIBE);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionTextPlain(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
String transcription = client.getAudioTranscriptionText(deploymentName, fileName, transcriptionOptions);
assertEquals(BATMAN_TRANSCRIPTION + "\n", transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionSrt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.SRT);
String transcription = client.getAudioTranscriptionText(deploymentName, fileName, transcriptionOptions);
assertTrue(transcription.contains("1\n"));
assertTrue(transcription.contains("00:00:00,000 --> "));
assertTrue(transcription.contains("Batman"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionVtt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.VTT);
String transcription = client.getAudioTranscriptionText(deploymentName, fileName, transcriptionOptions);
assertTrue(transcription.startsWith("WEBVTT\n"));
assertTrue(transcription.contains("00:00:00.000 --> "));
assertTrue(transcription.contains("Batman"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionTextWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.JSON,
AudioTranscriptionFormat.VERBOSE_JSON
);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions audioTranscriptionOptions = new AudioTranscriptionOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
audioTranscriptionOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () ->
client.getAudioTranscriptionText(deploymentName, fileName, audioTranscriptionOptions));
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionJsonWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.TEXT,
AudioTranscriptionFormat.SRT,
AudioTranscriptionFormat.VTT
);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions audioTranscriptionOptions = new AudioTranscriptionOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
audioTranscriptionOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () ->
client.getAudioTranscription(deploymentName, fileName, audioTranscriptionOptions));
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
AudioTranscription translation = client.getAudioTranslation(deploymentName, fileName, translationOptions);
assertAudioTranscriptionSimpleJson(translation, "It's raining today.");
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationVerboseJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
AudioTranscription translation = client.getAudioTranslation(deploymentName, fileName, translationOptions);
assertAudioTranscriptionVerboseJson(translation, "It's raining today.", AudioTaskLabel.TRANSLATE);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextPlain(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
String transcription = client.getAudioTranslationText(deploymentName, fileName, translationOptions);
assertEquals("It's raining today.\n", transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationSrt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.SRT);
String transcription = client.getAudioTranslationText(deploymentName, fileName, translationOptions);
assertTrue(transcription.contains("1\n"));
assertTrue(transcription.contains("00:00:00,000 --> "));
assertTrue(transcription.contains("It's raining today."));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationVtt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.VTT);
String transcription = client.getAudioTranslationText(deploymentName, fileName, translationOptions);
assertTrue(transcription.startsWith("WEBVTT\n"));
assertTrue(transcription.contains("00:00:00.000 --> "));
assertTrue(transcription.contains("It's raining today."));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.JSON,
AudioTranscriptionFormat.VERBOSE_JSON
);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
translationOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslationText(deploymentName, fileName, translationOptions);
});
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
} |
Not entirely sure how I would be able to achieve that. What would you suggest? | public void testGetAudioTranslationJsonWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(deploymentName, translationOptions, fileName);
});
translationOptions.setResponseFormat(AudioTranscriptionFormat.SRT);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(deploymentName, translationOptions, fileName);
});
translationOptions.setResponseFormat(AudioTranscriptionFormat.VTT);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(deploymentName, translationOptions, fileName);
});
});
} | }); | public void testGetAudioTranslationJsonWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.TEXT,
AudioTranscriptionFormat.SRT,
AudioTranscriptionFormat.VTT
);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
translationOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(deploymentName, fileName, translationOptions);
});
}
});
} | class OpenAISyncClientTest extends OpenAIClientTestBase {
private OpenAIClient client;
private OpenAIClient getOpenAIClient(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
return getOpenAIClientBuilder(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient, serviceVersion)
.buildClient();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
Completions resultCompletions = client.getCompletions(deploymentId, new CompletionsOptions(prompt));
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
IterableStream<Completions> resultCompletions = client.getCompletionsStream(deploymentId, new CompletionsOptions(prompt));
assertTrue(resultCompletions.stream().toArray().length > 1);
resultCompletions.forEach(OpenAIClientTestBase::assertCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsFromPrompt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsFromSinglePromptRunner((deploymentId, prompts) -> {
Completions completions = client.getCompletions(deploymentId, prompts);
assertCompletions(1, completions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
Response<BinaryData> response = client.getCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions());
Completions resultCompletions = assertAndGetValueFromResponse(response, Completions.class, 200);
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsWithResponseBadDeployment(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((_deploymentId, prompt) -> {
String deploymentId = "BAD_DEPLOYMENT_ID";
ResourceNotFoundException exception = assertThrows(ResourceNotFoundException.class,
() -> client.getCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions()));
assertEquals(404, exception.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsUsageField(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(1024);
completionsOptions.setN(3);
completionsOptions.setLogprobs(1);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
CompletionsUsage usage = resultCompletions.getUsage();
assertCompletions(completionsOptions.getN() * completionsOptions.getPrompt().size(), resultCompletions);
assertNotNull(usage);
assertTrue(usage.getTotalTokens() > 0);
assertEquals(usage.getCompletionTokens() + usage.getPromptTokens(), usage.getTotalTokens());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsTokenCutoff(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(3);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, "length", resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
ChatCompletions resultChatCompletions = client.getChatCompletions(deploymentId, new ChatCompletionsOptions(chatMessages));
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
IterableStream<ChatCompletions> resultChatCompletions = client.getChatCompletionsStream(deploymentId, new ChatCompletionsOptions(chatMessages));
assertTrue(resultChatCompletions.stream().toArray().length > 1);
resultChatCompletions.forEach(OpenAIClientTestBase::assertChatCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
Response<BinaryData> response = client.getChatCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new ChatCompletionsOptions(chatMessages)), new RequestOptions());
ChatCompletions resultChatCompletions = assertAndGetValueFromResponse(response, ChatCompletions.class, 200);
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getEmbeddingRunner((deploymentId, embeddingsOptions) -> {
Embeddings resultEmbeddings = client.getEmbeddings(deploymentId, embeddingsOptions);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getEmbeddingRunner((deploymentId, embeddingsOptions) -> {
Response<BinaryData> response = client.getEmbeddingsWithResponse(deploymentId,
BinaryData.fromObject(embeddingsOptions), new RequestOptions());
Embeddings resultEmbeddings = assertAndGetValueFromResponse(response, Embeddings.class, 200);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGenerateImage(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getImageGenerationRunner(options -> assertImageResponse(client.getImages(options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionAutoPreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.AUTO);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertEquals(1, chatCompletions.getChoices().size());
ChatChoice chatChoice = chatCompletions.getChoices().get(0);
MyFunctionCallArguments arguments = assertFunctionCall(
chatChoice,
"MyFunction",
MyFunctionCallArguments.class);
assertEquals(arguments.getLocation(), "San Francisco, CA");
assertEquals(arguments.getUnit(), "CELSIUS");
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNonePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.NONE);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertChatCompletions(1, "stop", ChatRole.ASSISTANT, chatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNotSuppliedByNamePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(new FunctionCallConfig("NotMyFunction"));
HttpResponseException exception = assertThrows(HttpResponseException.class,
() -> client.getChatCompletions(modelId, chatCompletionsOptions));
assertEquals(400, exception.getResponse().getStatusCode());
assertInstanceOf(HttpResponseException.class, exception);
assertTrue(exception.getMessage().contains("Invalid value for 'function_call'"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsContentFilterRunner((modelId, chatMessages) -> {
ChatCompletions chatCompletions = client.getChatCompletions(modelId, new ChatCompletionsOptions(chatMessages));
assertSafeContentFilterResults(chatCompletions.getPromptFilterResults().get(0).getContentFilterResults());
assertEquals(1, chatCompletions.getChoices().size());
assertSafeContentFilterResults(chatCompletions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionStreamContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsContentFilterRunner((modelId, chatMessages) -> {
IterableStream<ChatCompletions> messageList = client.getChatCompletionsStream(modelId, new ChatCompletionsOptions(chatMessages));
int i = 0;
int totalMessages = messageList.stream().toArray().length;
for (Iterator<ChatCompletions> it = messageList.iterator(); it.hasNext();) {
ChatCompletions chatCompletions = it.next();
assertChatCompletionsStream(chatCompletions);
if (i == 0) {
assertEquals(1, chatCompletions.getPromptFilterResults().size());
assertSafeContentFilterResults(chatCompletions.getPromptFilterResults().get(0).getContentFilterResults());
} else if (i == 1) {
assertEquals(ChatRole.ASSISTANT, chatCompletions.getChoices().get(0).getDelta().getRole());
assertNull(chatCompletions.getPromptFilterResults());
ContentFilterResults contentFilterResults = chatCompletions.getChoices().get(0).getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else if (i == totalMessages - 1) {
assertEquals(1, chatCompletions.getChoices().size());
ChatChoice chatChoice = chatCompletions.getChoices().get(0);
assertEquals(CompletionsFinishReason.fromString("stop"), chatChoice.getFinishReason());
assertNotNull(chatChoice.getDelta());
assertNull(chatChoice.getDelta().getContent());
ContentFilterResults contentFilterResults = chatChoice.getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else {
assertNull(chatCompletions.getPromptFilterResults());
assertNotNull(chatCompletions.getChoices().get(0).getDelta());
assertSafeContentFilterResults(chatCompletions.getChoices().get(0).getContentFilterResults());
}
i++;
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsContentFilterRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(Arrays.asList(prompt));
completionsOptions.setMaxTokens(2000);
Completions completions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, completions);
assertSafeContentFilterResults(completions.getPromptFilterResults().get(0).getContentFilterResults());
assertSafeContentFilterResults(completions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testCompletionStreamContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
IterableStream<Completions> resultCompletions = client.getCompletionsStream(deploymentId, new CompletionsOptions(prompt));
assertTrue(resultCompletions.stream().toArray().length > 1);
int i = 0;
int totalCompletions = resultCompletions.stream().toArray().length;
for (Iterator<Completions> it = resultCompletions.iterator(); it.hasNext();) {
Completions completions = it.next();
assertCompletionsStream(completions);
if (i == 0) {
assertEquals(1, completions.getPromptFilterResults().size());
assertSafeContentFilterResults(completions.getPromptFilterResults().get(0).getContentFilterResults());
} else if (i == totalCompletions - 1) {
assertEquals(1, completions.getChoices().size());
Choice choice = completions.getChoices().get(0);
assertEquals(CompletionsFinishReason.fromString("stop"), choice.getFinishReason());
assertNotNull(choice.getText());
ContentFilterResults contentFilterResults = choice.getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else {
assertNull(completions.getPromptFilterResults());
assertNotNull(completions.getChoices().get(0));
assertSafeContentFilterResults(completions.getChoices().get(0).getContentFilterResults());
}
i++;
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionsBasicSearchExtension(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsAzureChatSearchRunner((deploymentName, chatCompletionsOptions) -> {
AzureCognitiveSearchChatExtensionConfiguration cognitiveSearchConfiguration =
new AzureCognitiveSearchChatExtensionConfiguration(
"https:
getAzureCognitiveSearchKey(),
"openai-test-index-carbon-wiki"
);
AzureChatExtensionConfiguration extensionConfiguration =
new AzureChatExtensionConfiguration(
AzureChatExtensionType.AZURE_COGNITIVE_SEARCH,
BinaryData.fromObject(cognitiveSearchConfiguration));
chatCompletionsOptions.setDataSources(Arrays.asList(extensionConfiguration));
ChatCompletions chatCompletions = client.getChatCompletions(deploymentName, chatCompletionsOptions);
assertChatCompletionsCognitiveSearch(chatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionsStreamingBasicSearchExtension(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsAzureChatSearchRunner((deploymentName, chatCompletionsOptions) -> {
AzureCognitiveSearchChatExtensionConfiguration cognitiveSearchConfiguration =
new AzureCognitiveSearchChatExtensionConfiguration(
"https:
getAzureCognitiveSearchKey(),
"openai-test-index-carbon-wiki"
);
AzureChatExtensionConfiguration extensionConfiguration =
new AzureChatExtensionConfiguration(
AzureChatExtensionType.AZURE_COGNITIVE_SEARCH,
BinaryData.fromObject(cognitiveSearchConfiguration));
chatCompletionsOptions.setDataSources(Arrays.asList(extensionConfiguration));
IterableStream<ChatCompletions> resultChatCompletions = client.getChatCompletionsStream(deploymentName, chatCompletionsOptions);
assertChatCompletionsStreamingCognitiveSearch(resultChatCompletions.stream());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscription(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
AudioTranscription transcription = client.getAudioTranslation(deploymentName, translationOptions, fileName);
assertNotNull(transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationVerboseJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
AudioTranscription transcription = client.getAudioTranslation(deploymentName, translationOptions, fileName);
assertNotNull(transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextPlain(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
String transcription = client.getAudioTranslationText(deploymentName, translationOptions, fileName);
assertEquals("It's raining today.\n", transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextPlainWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslationText(deploymentName, translationOptions, fileName);
});
translationOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslationText(deploymentName, translationOptions, fileName);
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
} | class OpenAISyncClientTest extends OpenAIClientTestBase {
private OpenAIClient client;
private OpenAIClient getOpenAIClient(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
return getOpenAIClientBuilder(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient, serviceVersion)
.buildClient();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
Completions resultCompletions = client.getCompletions(deploymentId, new CompletionsOptions(prompt));
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
IterableStream<Completions> resultCompletions = client.getCompletionsStream(deploymentId, new CompletionsOptions(prompt));
assertTrue(resultCompletions.stream().toArray().length > 1);
resultCompletions.forEach(OpenAIClientTestBase::assertCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsFromPrompt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsFromSinglePromptRunner((deploymentId, prompts) -> {
Completions completions = client.getCompletions(deploymentId, prompts);
assertCompletions(1, completions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
Response<BinaryData> response = client.getCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions());
Completions resultCompletions = assertAndGetValueFromResponse(response, Completions.class, 200);
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsWithResponseBadDeployment(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((_deploymentId, prompt) -> {
String deploymentId = "BAD_DEPLOYMENT_ID";
ResourceNotFoundException exception = assertThrows(ResourceNotFoundException.class,
() -> client.getCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions()));
assertEquals(404, exception.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsUsageField(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(1024);
completionsOptions.setN(3);
completionsOptions.setLogprobs(1);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
CompletionsUsage usage = resultCompletions.getUsage();
assertCompletions(completionsOptions.getN() * completionsOptions.getPrompt().size(), resultCompletions);
assertNotNull(usage);
assertTrue(usage.getTotalTokens() > 0);
assertEquals(usage.getCompletionTokens() + usage.getPromptTokens(), usage.getTotalTokens());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsTokenCutoff(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(3);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, "length", resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
ChatCompletions resultChatCompletions = client.getChatCompletions(deploymentId, new ChatCompletionsOptions(chatMessages));
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
IterableStream<ChatCompletions> resultChatCompletions = client.getChatCompletionsStream(deploymentId, new ChatCompletionsOptions(chatMessages));
assertTrue(resultChatCompletions.stream().toArray().length > 1);
resultChatCompletions.forEach(OpenAIClientTestBase::assertChatCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
Response<BinaryData> response = client.getChatCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new ChatCompletionsOptions(chatMessages)), new RequestOptions());
ChatCompletions resultChatCompletions = assertAndGetValueFromResponse(response, ChatCompletions.class, 200);
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getEmbeddingRunner((deploymentId, embeddingsOptions) -> {
Embeddings resultEmbeddings = client.getEmbeddings(deploymentId, embeddingsOptions);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getEmbeddingRunner((deploymentId, embeddingsOptions) -> {
Response<BinaryData> response = client.getEmbeddingsWithResponse(deploymentId,
BinaryData.fromObject(embeddingsOptions), new RequestOptions());
Embeddings resultEmbeddings = assertAndGetValueFromResponse(response, Embeddings.class, 200);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGenerateImage(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getImageGenerationRunner(options -> assertImageResponse(client.getImages(options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionAutoPreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.AUTO);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertEquals(1, chatCompletions.getChoices().size());
ChatChoice chatChoice = chatCompletions.getChoices().get(0);
MyFunctionCallArguments arguments = assertFunctionCall(
chatChoice,
"MyFunction",
MyFunctionCallArguments.class);
assertEquals(arguments.getLocation(), "San Francisco, CA");
assertEquals(arguments.getUnit(), "CELSIUS");
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNonePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.NONE);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertChatCompletions(1, "stop", ChatRole.ASSISTANT, chatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNotSuppliedByNamePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(new FunctionCallConfig("NotMyFunction"));
HttpResponseException exception = assertThrows(HttpResponseException.class,
() -> client.getChatCompletions(modelId, chatCompletionsOptions));
assertEquals(400, exception.getResponse().getStatusCode());
assertInstanceOf(HttpResponseException.class, exception);
assertTrue(exception.getMessage().contains("Invalid value for 'function_call'"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, OpenAIServiceVersion.V2023_08_01_PREVIEW);
getChatCompletionsContentFilterRunner((modelId, chatMessages) -> {
ChatCompletions chatCompletions = client.getChatCompletions(modelId, new ChatCompletionsOptions(chatMessages));
assertSafeContentFilterResults(chatCompletions.getPromptFilterResults().get(0).getContentFilterResults());
assertEquals(1, chatCompletions.getChoices().size());
assertSafeContentFilterResults(chatCompletions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionStreamContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, OpenAIServiceVersion.V2023_08_01_PREVIEW);
getChatCompletionsContentFilterRunner((modelId, chatMessages) -> {
IterableStream<ChatCompletions> messageList = client.getChatCompletionsStream(modelId, new ChatCompletionsOptions(chatMessages));
int i = 0;
int totalMessages = messageList.stream().toArray().length;
for (Iterator<ChatCompletions> it = messageList.iterator(); it.hasNext();) {
ChatCompletions chatCompletions = it.next();
assertChatCompletionsStream(chatCompletions);
if (i == 0) {
assertEquals(1, chatCompletions.getPromptFilterResults().size());
assertSafeContentFilterResults(chatCompletions.getPromptFilterResults().get(0).getContentFilterResults());
} else if (i == 1) {
assertEquals(ChatRole.ASSISTANT, chatCompletions.getChoices().get(0).getDelta().getRole());
assertNull(chatCompletions.getPromptFilterResults());
ContentFilterResults contentFilterResults = chatCompletions.getChoices().get(0).getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else if (i == totalMessages - 1) {
assertEquals(1, chatCompletions.getChoices().size());
ChatChoice chatChoice = chatCompletions.getChoices().get(0);
assertEquals(CompletionsFinishReason.fromString("stop"), chatChoice.getFinishReason());
assertNotNull(chatChoice.getDelta());
assertNull(chatChoice.getDelta().getContent());
ContentFilterResults contentFilterResults = chatChoice.getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else {
assertNull(chatCompletions.getPromptFilterResults());
assertNotNull(chatCompletions.getChoices().get(0).getDelta());
assertSafeContentFilterResults(chatCompletions.getChoices().get(0).getContentFilterResults());
}
i++;
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, OpenAIServiceVersion.V2023_08_01_PREVIEW);
getCompletionsContentFilterRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(Arrays.asList(prompt));
completionsOptions.setMaxTokens(2000);
Completions completions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, completions);
assertSafeContentFilterResults(completions.getPromptFilterResults().get(0).getContentFilterResults());
assertSafeContentFilterResults(completions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testCompletionStreamContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
IterableStream<Completions> resultCompletions = client.getCompletionsStream(deploymentId, new CompletionsOptions(prompt));
assertTrue(resultCompletions.stream().toArray().length > 1);
int i = 0;
int totalCompletions = resultCompletions.stream().toArray().length;
for (Iterator<Completions> it = resultCompletions.iterator(); it.hasNext();) {
Completions completions = it.next();
assertCompletionsStream(completions);
if (i == 0) {
assertEquals(1, completions.getPromptFilterResults().size());
assertSafeContentFilterResults(completions.getPromptFilterResults().get(0).getContentFilterResults());
} else if (i == totalCompletions - 1) {
assertEquals(1, completions.getChoices().size());
Choice choice = completions.getChoices().get(0);
assertEquals(CompletionsFinishReason.fromString("stop"), choice.getFinishReason());
assertNotNull(choice.getText());
ContentFilterResults contentFilterResults = choice.getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else {
assertNull(completions.getPromptFilterResults());
assertNotNull(completions.getChoices().get(0));
assertSafeContentFilterResults(completions.getChoices().get(0).getContentFilterResults());
}
i++;
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionsBasicSearchExtension(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, OpenAIServiceVersion.V2023_08_01_PREVIEW);
getChatCompletionsAzureChatSearchRunner((deploymentName, chatCompletionsOptions) -> {
AzureCognitiveSearchChatExtensionConfiguration cognitiveSearchConfiguration =
new AzureCognitiveSearchChatExtensionConfiguration(
"https:
getAzureCognitiveSearchKey(),
"openai-test-index-carbon-wiki"
);
AzureChatExtensionConfiguration extensionConfiguration =
new AzureChatExtensionConfiguration(
AzureChatExtensionType.AZURE_COGNITIVE_SEARCH,
BinaryData.fromObject(cognitiveSearchConfiguration));
chatCompletionsOptions.setDataSources(Arrays.asList(extensionConfiguration));
ChatCompletions chatCompletions = client.getChatCompletions(deploymentName, chatCompletionsOptions);
assertChatCompletionsCognitiveSearch(chatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionsStreamingBasicSearchExtension(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, OpenAIServiceVersion.V2023_08_01_PREVIEW);
getChatCompletionsAzureChatSearchRunner((deploymentName, chatCompletionsOptions) -> {
AzureCognitiveSearchChatExtensionConfiguration cognitiveSearchConfiguration =
new AzureCognitiveSearchChatExtensionConfiguration(
"https:
getAzureCognitiveSearchKey(),
"openai-test-index-carbon-wiki"
);
AzureChatExtensionConfiguration extensionConfiguration =
new AzureChatExtensionConfiguration(
AzureChatExtensionType.AZURE_COGNITIVE_SEARCH,
BinaryData.fromObject(cognitiveSearchConfiguration));
chatCompletionsOptions.setDataSources(Arrays.asList(extensionConfiguration));
IterableStream<ChatCompletions> resultChatCompletions = client.getChatCompletionsStream(deploymentName, chatCompletionsOptions);
assertChatCompletionsStreamingCognitiveSearch(resultChatCompletions.stream());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
AudioTranscription transcription = client.getAudioTranscription(deploymentName, fileName, transcriptionOptions);
assertAudioTranscriptionSimpleJson(transcription, BATMAN_TRANSCRIPTION);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionVerboseJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
AudioTranscription transcription = client.getAudioTranscription(deploymentName, fileName, transcriptionOptions);
assertAudioTranscriptionVerboseJson(transcription, BATMAN_TRANSCRIPTION, AudioTaskLabel.TRANSCRIBE);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionTextPlain(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
String transcription = client.getAudioTranscriptionText(deploymentName, fileName, transcriptionOptions);
assertEquals(BATMAN_TRANSCRIPTION + "\n", transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionSrt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.SRT);
String transcription = client.getAudioTranscriptionText(deploymentName, fileName, transcriptionOptions);
assertTrue(transcription.contains("1\n"));
assertTrue(transcription.contains("00:00:00,000 --> "));
assertTrue(transcription.contains("Batman"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionVtt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.VTT);
String transcription = client.getAudioTranscriptionText(deploymentName, fileName, transcriptionOptions);
assertTrue(transcription.startsWith("WEBVTT\n"));
assertTrue(transcription.contains("00:00:00.000 --> "));
assertTrue(transcription.contains("Batman"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionTextWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.JSON,
AudioTranscriptionFormat.VERBOSE_JSON
);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions audioTranscriptionOptions = new AudioTranscriptionOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
audioTranscriptionOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () ->
client.getAudioTranscriptionText(deploymentName, fileName, audioTranscriptionOptions));
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionJsonWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.TEXT,
AudioTranscriptionFormat.SRT,
AudioTranscriptionFormat.VTT
);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions audioTranscriptionOptions = new AudioTranscriptionOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
audioTranscriptionOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () ->
client.getAudioTranscription(deploymentName, fileName, audioTranscriptionOptions));
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
AudioTranscription translation = client.getAudioTranslation(deploymentName, fileName, translationOptions);
assertAudioTranscriptionSimpleJson(translation, "It's raining today.");
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationVerboseJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
AudioTranscription translation = client.getAudioTranslation(deploymentName, fileName, translationOptions);
assertAudioTranscriptionVerboseJson(translation, "It's raining today.", AudioTaskLabel.TRANSLATE);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextPlain(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
String transcription = client.getAudioTranslationText(deploymentName, fileName, translationOptions);
assertEquals("It's raining today.\n", transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationSrt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.SRT);
String transcription = client.getAudioTranslationText(deploymentName, fileName, translationOptions);
assertTrue(transcription.contains("1\n"));
assertTrue(transcription.contains("00:00:00,000 --> "));
assertTrue(transcription.contains("It's raining today."));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationVtt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.VTT);
String transcription = client.getAudioTranslationText(deploymentName, fileName, translationOptions);
assertTrue(transcription.startsWith("WEBVTT\n"));
assertTrue(transcription.contains("00:00:00.000 --> "));
assertTrue(transcription.contains("It's raining today."));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.JSON,
AudioTranscriptionFormat.VERBOSE_JSON
);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
translationOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslationText(deploymentName, fileName, translationOptions);
});
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
} |
```suggestion List<AudioTranscriptionFormat> wrongFormats = Arrays.asList( AudioTranscriptionFormat.TEXT, AudioTranscriptionFormat.SRT, AudioTranscriptionFormat.VTT ); for (AudioTranscriptionFormat format : wrongFormats) { translationOptions.setResponseFormat(format); assertThrows(IllegalArgumentException.class, () -> client.getAudioTranslation(deploymentName, translationOptions, fileName)); } ``` | public void testGetAudioTranslationJsonWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranslationRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(modelId, translationOptions, fileName);
});
translationOptions.setResponseFormat(AudioTranscriptionFormat.SRT);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(modelId, translationOptions, fileName);
});
translationOptions.setResponseFormat(AudioTranscriptionFormat.VTT);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(modelId, translationOptions, fileName);
});
});
} | }); | public void testGetAudioTranslationJsonWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.TEXT,
AudioTranscriptionFormat.SRT,
AudioTranscriptionFormat.VTT
);
getAudioTranslationRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
translationOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(modelId, fileName, translationOptions);
});
}
});
} | class NonAzureOpenAISyncClientTest extends OpenAIClientTestBase {
private OpenAIClient client;
private OpenAIClient getNonAzureOpenAISyncClient(HttpClient httpClient) {
return getNonAzureOpenAIClientBuilder(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient)
.buildClient();
}
private OpenAIClient getNonAzureOpenAISyncClient(HttpClient httpClient, KeyCredential keyCredential) {
return getNonAzureOpenAIClientBuilder(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient)
.credential(keyCredential)
.buildClient();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getCompletionsRunner((deploymentId, prompt) -> {
Completions resultCompletions = client.getCompletions(deploymentId, new CompletionsOptions(prompt));
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getCompletionsRunner((deploymentId, prompt) -> {
IterableStream<Completions> resultCompletions = client.getCompletionsStream(deploymentId, new CompletionsOptions(prompt));
assertTrue(resultCompletions.stream().toArray().length > 1);
resultCompletions.forEach(OpenAIClientTestBase::assertCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsFromPrompt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getCompletionsFromSinglePromptRunner((deploymentId, prompts) -> {
Completions completions = client.getCompletions(deploymentId, prompts);
assertCompletions(1, completions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getCompletionsRunner((deploymentId, prompt) -> {
Response<BinaryData> response = client.getCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions());
Completions resultCompletions = assertAndGetValueFromResponse(response, Completions.class, 200);
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsBadSecretKey(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(
httpClient,
new KeyCredential("not_token_looking_string"));
getCompletionsRunner((modelId, prompt) -> {
ClientAuthenticationException exception = assertThrows(ClientAuthenticationException.class,
() -> client.getCompletionsWithResponse(modelId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions()));
assertEquals(401, exception.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsUsageField(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(1024);
completionsOptions.setN(3);
completionsOptions.setLogprobs(1);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
CompletionsUsage usage = resultCompletions.getUsage();
assertCompletions(completionsOptions.getN() * completionsOptions.getPrompt().size(), resultCompletions);
assertNotNull(usage);
assertTrue(usage.getTotalTokens() > 0);
assertEquals(usage.getCompletionTokens() + usage.getPromptTokens(), usage.getTotalTokens());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsTokenCutoff(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(3);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, "length", resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getChatCompletionsForNonAzureRunner((deploymentId, chatMessages) -> {
ChatCompletions resultChatCompletions = client.getChatCompletions(deploymentId, new ChatCompletionsOptions(chatMessages));
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getChatCompletionsForNonAzureRunner((deploymentId, chatMessages) -> {
IterableStream<ChatCompletions> resultChatCompletions = client.getChatCompletionsStream(deploymentId, new ChatCompletionsOptions(chatMessages));
assertTrue(resultChatCompletions.stream().toArray().length > 1);
resultChatCompletions.forEach(OpenAIClientTestBase::assertChatCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getChatCompletionsForNonAzureRunner((deploymentId, chatMessages) -> {
Response<BinaryData> response = client.getChatCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new ChatCompletionsOptions(chatMessages)), new RequestOptions());
ChatCompletions resultChatCompletions = assertAndGetValueFromResponse(response, ChatCompletions.class, 200);
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getEmbeddingNonAzureRunner((deploymentId, embeddingsOptions) -> {
Embeddings resultEmbeddings = client.getEmbeddings(deploymentId, embeddingsOptions);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getEmbeddingNonAzureRunner((deploymentId, embeddingsOptions) -> {
Response<BinaryData> response = client.getEmbeddingsWithResponse(deploymentId,
BinaryData.fromObject(embeddingsOptions), new RequestOptions());
Embeddings resultEmbeddings = assertAndGetValueFromResponse(response, Embeddings.class, 200);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGenerateImage(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getImageGenerationRunner(options -> assertImageResponse(client.getImages(options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionAutoPreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getChatFunctionForNonAzureRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.AUTO);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertEquals(1, chatCompletions.getChoices().size());
ChatChoice chatChoice = chatCompletions.getChoices().get(0);
MyFunctionCallArguments arguments = assertFunctionCall(
chatChoice,
"MyFunction",
MyFunctionCallArguments.class);
assertEquals(arguments.getLocation(), "San Francisco, CA");
assertEquals(arguments.getUnit(), "CELSIUS");
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNonePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getChatFunctionForNonAzureRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.NONE);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertChatCompletions(1, "stop", ChatRole.ASSISTANT, chatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNotSuppliedByNamePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getChatFunctionForNonAzureRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(new FunctionCallConfig("NotMyFunction"));
HttpResponseException exception = assertThrows(HttpResponseException.class,
() -> client.getChatCompletions(modelId, chatCompletionsOptions));
assertEquals(400, exception.getResponse().getStatusCode());
assertInstanceOf(HttpResponseException.class, exception);
assertTrue(exception.getMessage().contains("Invalid value for 'function_call'"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getChatCompletionsContentFilterRunnerForNonAzure((modelId, chatMessages) -> {
ChatCompletions chatCompletions = client.getChatCompletions(modelId, new ChatCompletionsOptions(chatMessages));
assertNull(chatCompletions.getPromptFilterResults());
assertEquals(1, chatCompletions.getChoices().size());
assertNull(chatCompletions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getCompletionsContentFilterRunnerForNonAzure((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(Arrays.asList(prompt));
Completions completions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, completions);
assertNull(completions.getPromptFilterResults());
assertNull(completions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranscriptionRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
AudioTranscription transcription = client.getAudioTranscription(modelId, transcriptionOptions, fileName);
assertAudioTranscriptionSimpleJson(transcription,BATMAN_TRANSCRIPTION);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionVerboseJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranscriptionRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
AudioTranscription transcription = client.getAudioTranscription(modelId, transcriptionOptions, fileName);
assertAudioTranscriptionVerboseJson(transcription, BATMAN_TRANSCRIPTION, AudioTaskLabel.TRANSCRIBE);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionTextPlain(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranscriptionRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
String transcription = client.getAudioTranscriptionText(modelId, transcriptionOptions, fileName);
assertEquals(BATMAN_TRANSCRIPTION + "\n", transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranslationRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
AudioTranscription translation = client.getAudioTranslation(modelId, translationOptions, fileName);
assertAudioTranscriptionSimpleJson(translation,"It's raining today.");
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationVerboseJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranslationRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
AudioTranscription translation = client.getAudioTranslation(modelId, translationOptions, fileName);
assertAudioTranscriptionVerboseJson(translation,"It's raining today.", AudioTaskLabel.TRANSCRIBE);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextPlain(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranslationRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
String transcription = client.getAudioTranslationText(modelId, translationOptions, fileName);
assertEquals("It's raining today.\n", transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationSrt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranslationRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.SRT);
String transcription = client.getAudioTranslationText(modelId, translationOptions, fileName);
assertTrue(transcription.contains("1\n"));
assertTrue(transcription.contains("00:00:00,000 --> "));
assertTrue(transcription.contains("It's raining today."));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationVtt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranslationRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.VTT);
String transcription = client.getAudioTranslationText(modelId, translationOptions, fileName);
assertTrue(transcription.startsWith("WEBVTT\n"));
assertTrue(transcription.contains("00:00:00.000 --> "));
assertTrue(transcription.contains("It's raining today."));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranslationRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslationText(modelId, translationOptions, fileName);
});
translationOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslationText(modelId, translationOptions, fileName);
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
} | class NonAzureOpenAISyncClientTest extends OpenAIClientTestBase {
private OpenAIClient client;
private OpenAIClient getNonAzureOpenAISyncClient(HttpClient httpClient) {
return getNonAzureOpenAIClientBuilder(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient)
.buildClient();
}
private OpenAIClient getNonAzureOpenAISyncClient(HttpClient httpClient, KeyCredential keyCredential) {
return getNonAzureOpenAIClientBuilder(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient)
.credential(keyCredential)
.buildClient();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getCompletionsRunner((deploymentId, prompt) -> {
Completions resultCompletions = client.getCompletions(deploymentId, new CompletionsOptions(prompt));
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getCompletionsRunner((deploymentId, prompt) -> {
IterableStream<Completions> resultCompletions = client.getCompletionsStream(deploymentId, new CompletionsOptions(prompt));
assertTrue(resultCompletions.stream().toArray().length > 1);
resultCompletions.forEach(OpenAIClientTestBase::assertCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsFromPrompt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getCompletionsFromSinglePromptRunner((deploymentId, prompts) -> {
Completions completions = client.getCompletions(deploymentId, prompts);
assertCompletions(1, completions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getCompletionsRunner((deploymentId, prompt) -> {
Response<BinaryData> response = client.getCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions());
Completions resultCompletions = assertAndGetValueFromResponse(response, Completions.class, 200);
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsBadSecretKey(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(
httpClient,
new KeyCredential("not_token_looking_string"));
getCompletionsRunner((modelId, prompt) -> {
ClientAuthenticationException exception = assertThrows(ClientAuthenticationException.class,
() -> client.getCompletionsWithResponse(modelId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions()));
assertEquals(401, exception.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsUsageField(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(1024);
completionsOptions.setN(3);
completionsOptions.setLogprobs(1);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
CompletionsUsage usage = resultCompletions.getUsage();
assertCompletions(completionsOptions.getN() * completionsOptions.getPrompt().size(), resultCompletions);
assertNotNull(usage);
assertTrue(usage.getTotalTokens() > 0);
assertEquals(usage.getCompletionTokens() + usage.getPromptTokens(), usage.getTotalTokens());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsTokenCutoff(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(3);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, "length", resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getChatCompletionsForNonAzureRunner((deploymentId, chatMessages) -> {
ChatCompletions resultChatCompletions = client.getChatCompletions(deploymentId, new ChatCompletionsOptions(chatMessages));
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getChatCompletionsForNonAzureRunner((deploymentId, chatMessages) -> {
IterableStream<ChatCompletions> resultChatCompletions = client.getChatCompletionsStream(deploymentId, new ChatCompletionsOptions(chatMessages));
assertTrue(resultChatCompletions.stream().toArray().length > 1);
resultChatCompletions.forEach(OpenAIClientTestBase::assertChatCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getChatCompletionsForNonAzureRunner((deploymentId, chatMessages) -> {
Response<BinaryData> response = client.getChatCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new ChatCompletionsOptions(chatMessages)), new RequestOptions());
ChatCompletions resultChatCompletions = assertAndGetValueFromResponse(response, ChatCompletions.class, 200);
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getEmbeddingNonAzureRunner((deploymentId, embeddingsOptions) -> {
Embeddings resultEmbeddings = client.getEmbeddings(deploymentId, embeddingsOptions);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getEmbeddingNonAzureRunner((deploymentId, embeddingsOptions) -> {
Response<BinaryData> response = client.getEmbeddingsWithResponse(deploymentId,
BinaryData.fromObject(embeddingsOptions), new RequestOptions());
Embeddings resultEmbeddings = assertAndGetValueFromResponse(response, Embeddings.class, 200);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGenerateImage(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getImageGenerationRunner(options -> assertImageResponse(client.getImages(options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionAutoPreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getChatFunctionForNonAzureRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.AUTO);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertEquals(1, chatCompletions.getChoices().size());
ChatChoice chatChoice = chatCompletions.getChoices().get(0);
MyFunctionCallArguments arguments = assertFunctionCall(
chatChoice,
"MyFunction",
MyFunctionCallArguments.class);
assertEquals(arguments.getLocation(), "San Francisco, CA");
assertEquals(arguments.getUnit(), "CELSIUS");
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNonePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getChatFunctionForNonAzureRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.NONE);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertChatCompletions(1, "stop", ChatRole.ASSISTANT, chatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNotSuppliedByNamePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getChatFunctionForNonAzureRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(new FunctionCallConfig("NotMyFunction"));
HttpResponseException exception = assertThrows(HttpResponseException.class,
() -> client.getChatCompletions(modelId, chatCompletionsOptions));
assertEquals(400, exception.getResponse().getStatusCode());
assertInstanceOf(HttpResponseException.class, exception);
assertTrue(exception.getMessage().contains("Invalid value for 'function_call'"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getChatCompletionsContentFilterRunnerForNonAzure((modelId, chatMessages) -> {
ChatCompletions chatCompletions = client.getChatCompletions(modelId, new ChatCompletionsOptions(chatMessages));
assertNull(chatCompletions.getPromptFilterResults());
assertEquals(1, chatCompletions.getChoices().size());
assertNull(chatCompletions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getCompletionsContentFilterRunnerForNonAzure((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(Arrays.asList(prompt));
Completions completions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, completions);
assertNull(completions.getPromptFilterResults());
assertNull(completions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranscriptionRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
AudioTranscription transcription = client.getAudioTranscription(modelId, fileName, transcriptionOptions);
assertAudioTranscriptionSimpleJson(transcription, BATMAN_TRANSCRIPTION);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionVerboseJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranscriptionRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
AudioTranscription transcription = client.getAudioTranscription(modelId, fileName, transcriptionOptions);
assertAudioTranscriptionVerboseJson(transcription, BATMAN_TRANSCRIPTION, AudioTaskLabel.TRANSCRIBE);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionTextPlain(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranscriptionRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
String transcription = client.getAudioTranscriptionText(modelId, fileName, transcriptionOptions);
assertEquals(BATMAN_TRANSCRIPTION + "\n", transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionSrt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranscriptionRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.SRT);
String transcription = client.getAudioTranscriptionText(modelId, fileName, transcriptionOptions);
assertTrue(transcription.contains("1\n"));
assertTrue(transcription.contains("00:00:00,000 --> "));
assertTrue(transcription.contains("Batman"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionVtt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranscriptionRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.VTT);
String transcription = client.getAudioTranscriptionText(modelId, fileName, transcriptionOptions);
assertTrue(transcription.startsWith("WEBVTT\n"));
assertTrue(transcription.contains("00:00:00.000 --> "));
assertTrue(transcription.contains("Batman"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionTextWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.JSON,
AudioTranscriptionFormat.VERBOSE_JSON
);
getAudioTranscriptionRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
transcriptionOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranscriptionText(modelId, fileName, transcriptionOptions);
});
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionJsonWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.TEXT,
AudioTranscriptionFormat.SRT,
AudioTranscriptionFormat.VTT
);
getAudioTranscriptionRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
transcriptionOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranscription(modelId, fileName, transcriptionOptions);
});
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranslationRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
AudioTranscription translation = client.getAudioTranslation(modelId, fileName, translationOptions);
assertAudioTranscriptionSimpleJson(translation, "It's raining today.");
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationVerboseJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranslationRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
AudioTranscription translation = client.getAudioTranslation(modelId, fileName, translationOptions);
assertAudioTranscriptionVerboseJson(translation, "It's raining today.", AudioTaskLabel.TRANSLATE);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextPlain(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranslationRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
String transcription = client.getAudioTranslationText(modelId, fileName, translationOptions);
assertEquals("It's raining today.\n", transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationSrt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranslationRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.SRT);
String transcription = client.getAudioTranslationText(modelId, fileName, translationOptions);
assertTrue(transcription.contains("1\n"));
assertTrue(transcription.contains("00:00:00,000 --> "));
assertTrue(transcription.contains("It's raining today."));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationVtt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
getAudioTranslationRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.VTT);
String transcription = client.getAudioTranslationText(modelId, fileName, translationOptions);
assertTrue(transcription.startsWith("WEBVTT\n"));
assertTrue(transcription.contains("00:00:00.000 --> "));
assertTrue(transcription.contains("It's raining today."));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getNonAzureOpenAISyncClient(httpClient);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.JSON,
AudioTranscriptionFormat.VERBOSE_JSON
);
getAudioTranslationRunnerForNonAzure((modelId, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
translationOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslationText(modelId, fileName, translationOptions);
});
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
} |
Suggestion provided in https://github.com/Azure/azure-sdk-for-java/pull/36693#discussion_r1326183120 | public void testGetAudioTranslationJsonWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(deploymentName, translationOptions, fileName);
});
translationOptions.setResponseFormat(AudioTranscriptionFormat.SRT);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(deploymentName, translationOptions, fileName);
});
translationOptions.setResponseFormat(AudioTranscriptionFormat.VTT);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(deploymentName, translationOptions, fileName);
});
});
} | }); | public void testGetAudioTranslationJsonWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.TEXT,
AudioTranscriptionFormat.SRT,
AudioTranscriptionFormat.VTT
);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
translationOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslation(deploymentName, fileName, translationOptions);
});
}
});
} | class OpenAISyncClientTest extends OpenAIClientTestBase {
private OpenAIClient client;
private OpenAIClient getOpenAIClient(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
return getOpenAIClientBuilder(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient, serviceVersion)
.buildClient();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
Completions resultCompletions = client.getCompletions(deploymentId, new CompletionsOptions(prompt));
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
IterableStream<Completions> resultCompletions = client.getCompletionsStream(deploymentId, new CompletionsOptions(prompt));
assertTrue(resultCompletions.stream().toArray().length > 1);
resultCompletions.forEach(OpenAIClientTestBase::assertCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsFromPrompt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsFromSinglePromptRunner((deploymentId, prompts) -> {
Completions completions = client.getCompletions(deploymentId, prompts);
assertCompletions(1, completions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
Response<BinaryData> response = client.getCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions());
Completions resultCompletions = assertAndGetValueFromResponse(response, Completions.class, 200);
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsWithResponseBadDeployment(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((_deploymentId, prompt) -> {
String deploymentId = "BAD_DEPLOYMENT_ID";
ResourceNotFoundException exception = assertThrows(ResourceNotFoundException.class,
() -> client.getCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions()));
assertEquals(404, exception.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsUsageField(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(1024);
completionsOptions.setN(3);
completionsOptions.setLogprobs(1);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
CompletionsUsage usage = resultCompletions.getUsage();
assertCompletions(completionsOptions.getN() * completionsOptions.getPrompt().size(), resultCompletions);
assertNotNull(usage);
assertTrue(usage.getTotalTokens() > 0);
assertEquals(usage.getCompletionTokens() + usage.getPromptTokens(), usage.getTotalTokens());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsTokenCutoff(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(3);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, "length", resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
ChatCompletions resultChatCompletions = client.getChatCompletions(deploymentId, new ChatCompletionsOptions(chatMessages));
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
IterableStream<ChatCompletions> resultChatCompletions = client.getChatCompletionsStream(deploymentId, new ChatCompletionsOptions(chatMessages));
assertTrue(resultChatCompletions.stream().toArray().length > 1);
resultChatCompletions.forEach(OpenAIClientTestBase::assertChatCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
Response<BinaryData> response = client.getChatCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new ChatCompletionsOptions(chatMessages)), new RequestOptions());
ChatCompletions resultChatCompletions = assertAndGetValueFromResponse(response, ChatCompletions.class, 200);
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getEmbeddingRunner((deploymentId, embeddingsOptions) -> {
Embeddings resultEmbeddings = client.getEmbeddings(deploymentId, embeddingsOptions);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getEmbeddingRunner((deploymentId, embeddingsOptions) -> {
Response<BinaryData> response = client.getEmbeddingsWithResponse(deploymentId,
BinaryData.fromObject(embeddingsOptions), new RequestOptions());
Embeddings resultEmbeddings = assertAndGetValueFromResponse(response, Embeddings.class, 200);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGenerateImage(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getImageGenerationRunner(options -> assertImageResponse(client.getImages(options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionAutoPreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.AUTO);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertEquals(1, chatCompletions.getChoices().size());
ChatChoice chatChoice = chatCompletions.getChoices().get(0);
MyFunctionCallArguments arguments = assertFunctionCall(
chatChoice,
"MyFunction",
MyFunctionCallArguments.class);
assertEquals(arguments.getLocation(), "San Francisco, CA");
assertEquals(arguments.getUnit(), "CELSIUS");
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNonePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.NONE);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertChatCompletions(1, "stop", ChatRole.ASSISTANT, chatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNotSuppliedByNamePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(new FunctionCallConfig("NotMyFunction"));
HttpResponseException exception = assertThrows(HttpResponseException.class,
() -> client.getChatCompletions(modelId, chatCompletionsOptions));
assertEquals(400, exception.getResponse().getStatusCode());
assertInstanceOf(HttpResponseException.class, exception);
assertTrue(exception.getMessage().contains("Invalid value for 'function_call'"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsContentFilterRunner((modelId, chatMessages) -> {
ChatCompletions chatCompletions = client.getChatCompletions(modelId, new ChatCompletionsOptions(chatMessages));
assertSafeContentFilterResults(chatCompletions.getPromptFilterResults().get(0).getContentFilterResults());
assertEquals(1, chatCompletions.getChoices().size());
assertSafeContentFilterResults(chatCompletions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionStreamContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsContentFilterRunner((modelId, chatMessages) -> {
IterableStream<ChatCompletions> messageList = client.getChatCompletionsStream(modelId, new ChatCompletionsOptions(chatMessages));
int i = 0;
int totalMessages = messageList.stream().toArray().length;
for (Iterator<ChatCompletions> it = messageList.iterator(); it.hasNext();) {
ChatCompletions chatCompletions = it.next();
assertChatCompletionsStream(chatCompletions);
if (i == 0) {
assertEquals(1, chatCompletions.getPromptFilterResults().size());
assertSafeContentFilterResults(chatCompletions.getPromptFilterResults().get(0).getContentFilterResults());
} else if (i == 1) {
assertEquals(ChatRole.ASSISTANT, chatCompletions.getChoices().get(0).getDelta().getRole());
assertNull(chatCompletions.getPromptFilterResults());
ContentFilterResults contentFilterResults = chatCompletions.getChoices().get(0).getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else if (i == totalMessages - 1) {
assertEquals(1, chatCompletions.getChoices().size());
ChatChoice chatChoice = chatCompletions.getChoices().get(0);
assertEquals(CompletionsFinishReason.fromString("stop"), chatChoice.getFinishReason());
assertNotNull(chatChoice.getDelta());
assertNull(chatChoice.getDelta().getContent());
ContentFilterResults contentFilterResults = chatChoice.getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else {
assertNull(chatCompletions.getPromptFilterResults());
assertNotNull(chatCompletions.getChoices().get(0).getDelta());
assertSafeContentFilterResults(chatCompletions.getChoices().get(0).getContentFilterResults());
}
i++;
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsContentFilterRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(Arrays.asList(prompt));
completionsOptions.setMaxTokens(2000);
Completions completions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, completions);
assertSafeContentFilterResults(completions.getPromptFilterResults().get(0).getContentFilterResults());
assertSafeContentFilterResults(completions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testCompletionStreamContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
IterableStream<Completions> resultCompletions = client.getCompletionsStream(deploymentId, new CompletionsOptions(prompt));
assertTrue(resultCompletions.stream().toArray().length > 1);
int i = 0;
int totalCompletions = resultCompletions.stream().toArray().length;
for (Iterator<Completions> it = resultCompletions.iterator(); it.hasNext();) {
Completions completions = it.next();
assertCompletionsStream(completions);
if (i == 0) {
assertEquals(1, completions.getPromptFilterResults().size());
assertSafeContentFilterResults(completions.getPromptFilterResults().get(0).getContentFilterResults());
} else if (i == totalCompletions - 1) {
assertEquals(1, completions.getChoices().size());
Choice choice = completions.getChoices().get(0);
assertEquals(CompletionsFinishReason.fromString("stop"), choice.getFinishReason());
assertNotNull(choice.getText());
ContentFilterResults contentFilterResults = choice.getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else {
assertNull(completions.getPromptFilterResults());
assertNotNull(completions.getChoices().get(0));
assertSafeContentFilterResults(completions.getChoices().get(0).getContentFilterResults());
}
i++;
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionsBasicSearchExtension(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsAzureChatSearchRunner((deploymentName, chatCompletionsOptions) -> {
AzureCognitiveSearchChatExtensionConfiguration cognitiveSearchConfiguration =
new AzureCognitiveSearchChatExtensionConfiguration(
"https:
getAzureCognitiveSearchKey(),
"openai-test-index-carbon-wiki"
);
AzureChatExtensionConfiguration extensionConfiguration =
new AzureChatExtensionConfiguration(
AzureChatExtensionType.AZURE_COGNITIVE_SEARCH,
BinaryData.fromObject(cognitiveSearchConfiguration));
chatCompletionsOptions.setDataSources(Arrays.asList(extensionConfiguration));
ChatCompletions chatCompletions = client.getChatCompletions(deploymentName, chatCompletionsOptions);
assertChatCompletionsCognitiveSearch(chatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionsStreamingBasicSearchExtension(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsAzureChatSearchRunner((deploymentName, chatCompletionsOptions) -> {
AzureCognitiveSearchChatExtensionConfiguration cognitiveSearchConfiguration =
new AzureCognitiveSearchChatExtensionConfiguration(
"https:
getAzureCognitiveSearchKey(),
"openai-test-index-carbon-wiki"
);
AzureChatExtensionConfiguration extensionConfiguration =
new AzureChatExtensionConfiguration(
AzureChatExtensionType.AZURE_COGNITIVE_SEARCH,
BinaryData.fromObject(cognitiveSearchConfiguration));
chatCompletionsOptions.setDataSources(Arrays.asList(extensionConfiguration));
IterableStream<ChatCompletions> resultChatCompletions = client.getChatCompletionsStream(deploymentName, chatCompletionsOptions);
assertChatCompletionsStreamingCognitiveSearch(resultChatCompletions.stream());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscription(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
AudioTranscription transcription = client.getAudioTranslation(deploymentName, translationOptions, fileName);
assertNotNull(transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationVerboseJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
AudioTranscription transcription = client.getAudioTranslation(deploymentName, translationOptions, fileName);
assertNotNull(transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextPlain(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
String transcription = client.getAudioTranslationText(deploymentName, translationOptions, fileName);
assertEquals("It's raining today.\n", transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextPlainWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslationText(deploymentName, translationOptions, fileName);
});
translationOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslationText(deploymentName, translationOptions, fileName);
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
} | class OpenAISyncClientTest extends OpenAIClientTestBase {
private OpenAIClient client;
private OpenAIClient getOpenAIClient(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
return getOpenAIClientBuilder(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient, serviceVersion)
.buildClient();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
Completions resultCompletions = client.getCompletions(deploymentId, new CompletionsOptions(prompt));
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
IterableStream<Completions> resultCompletions = client.getCompletionsStream(deploymentId, new CompletionsOptions(prompt));
assertTrue(resultCompletions.stream().toArray().length > 1);
resultCompletions.forEach(OpenAIClientTestBase::assertCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsFromPrompt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsFromSinglePromptRunner((deploymentId, prompts) -> {
Completions completions = client.getCompletions(deploymentId, prompts);
assertCompletions(1, completions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
Response<BinaryData> response = client.getCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions());
Completions resultCompletions = assertAndGetValueFromResponse(response, Completions.class, 200);
assertCompletions(1, resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsWithResponseBadDeployment(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((_deploymentId, prompt) -> {
String deploymentId = "BAD_DEPLOYMENT_ID";
ResourceNotFoundException exception = assertThrows(ResourceNotFoundException.class,
() -> client.getCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new CompletionsOptions(prompt)), new RequestOptions()));
assertEquals(404, exception.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsUsageField(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(1024);
completionsOptions.setN(3);
completionsOptions.setLogprobs(1);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
CompletionsUsage usage = resultCompletions.getUsage();
assertCompletions(completionsOptions.getN() * completionsOptions.getPrompt().size(), resultCompletions);
assertNotNull(usage);
assertTrue(usage.getTotalTokens() > 0);
assertEquals(usage.getCompletionTokens() + usage.getPromptTokens(), usage.getTotalTokens());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetCompletionsTokenCutoff(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(prompt);
completionsOptions.setMaxTokens(3);
Completions resultCompletions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, "length", resultCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
ChatCompletions resultChatCompletions = client.getChatCompletions(deploymentId, new ChatCompletionsOptions(chatMessages));
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsStream(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
IterableStream<ChatCompletions> resultChatCompletions = client.getChatCompletionsStream(deploymentId, new ChatCompletionsOptions(chatMessages));
assertTrue(resultChatCompletions.stream().toArray().length > 1);
resultChatCompletions.forEach(OpenAIClientTestBase::assertChatCompletionsStream);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatCompletionsRunner((deploymentId, chatMessages) -> {
Response<BinaryData> response = client.getChatCompletionsWithResponse(deploymentId,
BinaryData.fromObject(new ChatCompletionsOptions(chatMessages)), new RequestOptions());
ChatCompletions resultChatCompletions = assertAndGetValueFromResponse(response, ChatCompletions.class, 200);
assertChatCompletions(1, resultChatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getEmbeddingRunner((deploymentId, embeddingsOptions) -> {
Embeddings resultEmbeddings = client.getEmbeddings(deploymentId, embeddingsOptions);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getEmbeddingRunner((deploymentId, embeddingsOptions) -> {
Response<BinaryData> response = client.getEmbeddingsWithResponse(deploymentId,
BinaryData.fromObject(embeddingsOptions), new RequestOptions());
Embeddings resultEmbeddings = assertAndGetValueFromResponse(response, Embeddings.class, 200);
assertEmbeddings(resultEmbeddings);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGenerateImage(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getImageGenerationRunner(options -> assertImageResponse(client.getImages(options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionAutoPreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.AUTO);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertEquals(1, chatCompletions.getChoices().size());
ChatChoice chatChoice = chatCompletions.getChoices().get(0);
MyFunctionCallArguments arguments = assertFunctionCall(
chatChoice,
"MyFunction",
MyFunctionCallArguments.class);
assertEquals(arguments.getLocation(), "San Francisco, CA");
assertEquals(arguments.getUnit(), "CELSIUS");
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNonePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(FunctionCallConfig.NONE);
ChatCompletions chatCompletions = client.getChatCompletions(modelId, chatCompletionsOptions);
assertChatCompletions(1, "stop", ChatRole.ASSISTANT, chatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatFunctionNotSuppliedByNamePreset(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getChatFunctionForRunner((modelId, chatCompletionsOptions) -> {
chatCompletionsOptions.setFunctionCall(new FunctionCallConfig("NotMyFunction"));
HttpResponseException exception = assertThrows(HttpResponseException.class,
() -> client.getChatCompletions(modelId, chatCompletionsOptions));
assertEquals(400, exception.getResponse().getStatusCode());
assertInstanceOf(HttpResponseException.class, exception);
assertTrue(exception.getMessage().contains("Invalid value for 'function_call'"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, OpenAIServiceVersion.V2023_08_01_PREVIEW);
getChatCompletionsContentFilterRunner((modelId, chatMessages) -> {
ChatCompletions chatCompletions = client.getChatCompletions(modelId, new ChatCompletionsOptions(chatMessages));
assertSafeContentFilterResults(chatCompletions.getPromptFilterResults().get(0).getContentFilterResults());
assertEquals(1, chatCompletions.getChoices().size());
assertSafeContentFilterResults(chatCompletions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionStreamContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, OpenAIServiceVersion.V2023_08_01_PREVIEW);
getChatCompletionsContentFilterRunner((modelId, chatMessages) -> {
IterableStream<ChatCompletions> messageList = client.getChatCompletionsStream(modelId, new ChatCompletionsOptions(chatMessages));
int i = 0;
int totalMessages = messageList.stream().toArray().length;
for (Iterator<ChatCompletions> it = messageList.iterator(); it.hasNext();) {
ChatCompletions chatCompletions = it.next();
assertChatCompletionsStream(chatCompletions);
if (i == 0) {
assertEquals(1, chatCompletions.getPromptFilterResults().size());
assertSafeContentFilterResults(chatCompletions.getPromptFilterResults().get(0).getContentFilterResults());
} else if (i == 1) {
assertEquals(ChatRole.ASSISTANT, chatCompletions.getChoices().get(0).getDelta().getRole());
assertNull(chatCompletions.getPromptFilterResults());
ContentFilterResults contentFilterResults = chatCompletions.getChoices().get(0).getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else if (i == totalMessages - 1) {
assertEquals(1, chatCompletions.getChoices().size());
ChatChoice chatChoice = chatCompletions.getChoices().get(0);
assertEquals(CompletionsFinishReason.fromString("stop"), chatChoice.getFinishReason());
assertNotNull(chatChoice.getDelta());
assertNull(chatChoice.getDelta().getContent());
ContentFilterResults contentFilterResults = chatChoice.getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else {
assertNull(chatCompletions.getPromptFilterResults());
assertNotNull(chatCompletions.getChoices().get(0).getDelta());
assertSafeContentFilterResults(chatCompletions.getChoices().get(0).getContentFilterResults());
}
i++;
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testCompletionContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, OpenAIServiceVersion.V2023_08_01_PREVIEW);
getCompletionsContentFilterRunner((modelId, prompt) -> {
CompletionsOptions completionsOptions = new CompletionsOptions(Arrays.asList(prompt));
completionsOptions.setMaxTokens(2000);
Completions completions = client.getCompletions(modelId, completionsOptions);
assertCompletions(1, completions);
assertSafeContentFilterResults(completions.getPromptFilterResults().get(0).getContentFilterResults());
assertSafeContentFilterResults(completions.getChoices().get(0).getContentFilterResults());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testCompletionStreamContentFiltering(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getCompletionsRunner((deploymentId, prompt) -> {
IterableStream<Completions> resultCompletions = client.getCompletionsStream(deploymentId, new CompletionsOptions(prompt));
assertTrue(resultCompletions.stream().toArray().length > 1);
int i = 0;
int totalCompletions = resultCompletions.stream().toArray().length;
for (Iterator<Completions> it = resultCompletions.iterator(); it.hasNext();) {
Completions completions = it.next();
assertCompletionsStream(completions);
if (i == 0) {
assertEquals(1, completions.getPromptFilterResults().size());
assertSafeContentFilterResults(completions.getPromptFilterResults().get(0).getContentFilterResults());
} else if (i == totalCompletions - 1) {
assertEquals(1, completions.getChoices().size());
Choice choice = completions.getChoices().get(0);
assertEquals(CompletionsFinishReason.fromString("stop"), choice.getFinishReason());
assertNotNull(choice.getText());
ContentFilterResults contentFilterResults = choice.getContentFilterResults();
assertEmptyContentFilterResults(contentFilterResults);
} else {
assertNull(completions.getPromptFilterResults());
assertNotNull(completions.getChoices().get(0));
assertSafeContentFilterResults(completions.getChoices().get(0).getContentFilterResults());
}
i++;
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionsBasicSearchExtension(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, OpenAIServiceVersion.V2023_08_01_PREVIEW);
getChatCompletionsAzureChatSearchRunner((deploymentName, chatCompletionsOptions) -> {
AzureCognitiveSearchChatExtensionConfiguration cognitiveSearchConfiguration =
new AzureCognitiveSearchChatExtensionConfiguration(
"https:
getAzureCognitiveSearchKey(),
"openai-test-index-carbon-wiki"
);
AzureChatExtensionConfiguration extensionConfiguration =
new AzureChatExtensionConfiguration(
AzureChatExtensionType.AZURE_COGNITIVE_SEARCH,
BinaryData.fromObject(cognitiveSearchConfiguration));
chatCompletionsOptions.setDataSources(Arrays.asList(extensionConfiguration));
ChatCompletions chatCompletions = client.getChatCompletions(deploymentName, chatCompletionsOptions);
assertChatCompletionsCognitiveSearch(chatCompletions);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testChatCompletionsStreamingBasicSearchExtension(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, OpenAIServiceVersion.V2023_08_01_PREVIEW);
getChatCompletionsAzureChatSearchRunner((deploymentName, chatCompletionsOptions) -> {
AzureCognitiveSearchChatExtensionConfiguration cognitiveSearchConfiguration =
new AzureCognitiveSearchChatExtensionConfiguration(
"https:
getAzureCognitiveSearchKey(),
"openai-test-index-carbon-wiki"
);
AzureChatExtensionConfiguration extensionConfiguration =
new AzureChatExtensionConfiguration(
AzureChatExtensionType.AZURE_COGNITIVE_SEARCH,
BinaryData.fromObject(cognitiveSearchConfiguration));
chatCompletionsOptions.setDataSources(Arrays.asList(extensionConfiguration));
IterableStream<ChatCompletions> resultChatCompletions = client.getChatCompletionsStream(deploymentName, chatCompletionsOptions);
assertChatCompletionsStreamingCognitiveSearch(resultChatCompletions.stream());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
AudioTranscription transcription = client.getAudioTranscription(deploymentName, fileName, transcriptionOptions);
assertAudioTranscriptionSimpleJson(transcription, BATMAN_TRANSCRIPTION);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionVerboseJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
AudioTranscription transcription = client.getAudioTranscription(deploymentName, fileName, transcriptionOptions);
assertAudioTranscriptionVerboseJson(transcription, BATMAN_TRANSCRIPTION, AudioTaskLabel.TRANSCRIBE);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionTextPlain(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
String transcription = client.getAudioTranscriptionText(deploymentName, fileName, transcriptionOptions);
assertEquals(BATMAN_TRANSCRIPTION + "\n", transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionSrt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.SRT);
String transcription = client.getAudioTranscriptionText(deploymentName, fileName, transcriptionOptions);
assertTrue(transcription.contains("1\n"));
assertTrue(transcription.contains("00:00:00,000 --> "));
assertTrue(transcription.contains("Batman"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionVtt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file);
transcriptionOptions.setResponseFormat(AudioTranscriptionFormat.VTT);
String transcription = client.getAudioTranscriptionText(deploymentName, fileName, transcriptionOptions);
assertTrue(transcription.startsWith("WEBVTT\n"));
assertTrue(transcription.contains("00:00:00.000 --> "));
assertTrue(transcription.contains("Batman"));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionTextWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.JSON,
AudioTranscriptionFormat.VERBOSE_JSON
);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions audioTranscriptionOptions = new AudioTranscriptionOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
audioTranscriptionOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () ->
client.getAudioTranscriptionText(deploymentName, fileName, audioTranscriptionOptions));
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranscriptionJsonWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.TEXT,
AudioTranscriptionFormat.SRT,
AudioTranscriptionFormat.VTT
);
getAudioTranscriptionRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranscriptionOptions audioTranscriptionOptions = new AudioTranscriptionOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
audioTranscriptionOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () ->
client.getAudioTranscription(deploymentName, fileName, audioTranscriptionOptions));
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.JSON);
AudioTranscription translation = client.getAudioTranslation(deploymentName, fileName, translationOptions);
assertAudioTranscriptionSimpleJson(translation, "It's raining today.");
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationVerboseJson(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.VERBOSE_JSON);
AudioTranscription translation = client.getAudioTranslation(deploymentName, fileName, translationOptions);
assertAudioTranscriptionVerboseJson(translation, "It's raining today.", AudioTaskLabel.TRANSLATE);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextPlain(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.TEXT);
String transcription = client.getAudioTranslationText(deploymentName, fileName, translationOptions);
assertEquals("It's raining today.\n", transcription);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationSrt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.SRT);
String transcription = client.getAudioTranslationText(deploymentName, fileName, translationOptions);
assertTrue(transcription.contains("1\n"));
assertTrue(transcription.contains("00:00:00,000 --> "));
assertTrue(transcription.contains("It's raining today."));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationVtt(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
translationOptions.setResponseFormat(AudioTranscriptionFormat.VTT);
String transcription = client.getAudioTranslationText(deploymentName, fileName, translationOptions);
assertTrue(transcription.startsWith("WEBVTT\n"));
assertTrue(transcription.contains("00:00:00.000 --> "));
assertTrue(transcription.contains("It's raining today."));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
public void testGetAudioTranslationTextWrongFormats(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
client = getOpenAIClient(httpClient, serviceVersion);
List<AudioTranscriptionFormat> wrongFormats = Arrays.asList(
AudioTranscriptionFormat.JSON,
AudioTranscriptionFormat.VERBOSE_JSON
);
getAudioTranslationRunner((deploymentName, fileName) -> {
byte[] file = BinaryData.fromFile(openTestResourceFile(fileName)).toBytes();
AudioTranslationOptions translationOptions = new AudioTranslationOptions(file);
for (AudioTranscriptionFormat format: wrongFormats) {
translationOptions.setResponseFormat(format);
assertThrows(IllegalArgumentException.class, () -> {
client.getAudioTranslationText(deploymentName, fileName, translationOptions);
});
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.TestUtils
} |
I was also thinking to make this API public so that SDK's like resource manager can add sanitizers in bulk. | public static HttpRequest createAddSanitizersRequest(List<TestProxySanitizer> sanitizers, URL proxyUrl) {
List<String> sanitizersJsonPayloads = new ArrayList<>(sanitizers.size());
for (TestProxySanitizer sanitizer : sanitizers) {
String requestBody;
String sanitizerType;
switch (sanitizer.getType()) {
case URL:
sanitizerType = TestProxySanitizerType.URL.getName();
requestBody = createRegexRequestBody(null, sanitizer.getRegex(), sanitizer.getRedactedValue(),
sanitizer.getGroupForReplace());
break;
case BODY_REGEX:
sanitizerType = TestProxySanitizerType.BODY_REGEX.getName();
requestBody = createRegexRequestBody(null, sanitizer.getRegex(), sanitizer.getRedactedValue(),
sanitizer.getGroupForReplace());
break;
case BODY_KEY:
sanitizerType = TestProxySanitizerType.BODY_KEY.getName();
requestBody = createBodyJsonKeyRequestBody(sanitizer.getKey(), sanitizer.getRegex(),
sanitizer.getRedactedValue());
break;
case HEADER:
sanitizerType = HEADER.getName();
if (sanitizer.getKey() == null && sanitizer.getRegex() == null) {
throw new RuntimeException(
"Missing regexKey and/or headerKey for sanitizer type {" + sanitizerType + "}");
}
requestBody = createRegexRequestBody(sanitizer.getKey(), sanitizer.getRegex(), sanitizer.getRedactedValue(),
sanitizer.getGroupForReplace());
break;
default:
throw new RuntimeException("Sanitizer type {" + sanitizer.getType() + "} not supported");
}
sanitizersJsonPayloads.add("{\"Name\":\"" + sanitizerType + "\",\"Body\":" + requestBody + "}");
}
String requestBody = "[" + CoreUtils.stringJoin(",", sanitizersJsonPayloads) + "]";
return new HttpRequest(HttpMethod.POST, proxyUrl +"/Admin/AddSanitizers").setBody(requestBody);
} | return new HttpRequest(HttpMethod.POST, proxyUrl +"/Admin/AddSanitizers").setBody(requestBody); | public static HttpRequest createAddSanitizersRequest(List<TestProxySanitizer> sanitizers, URL proxyUrl) {
List<String> sanitizersJsonPayloads = new ArrayList<>(sanitizers.size());
for (TestProxySanitizer sanitizer : sanitizers) {
String requestBody;
String sanitizerType;
switch (sanitizer.getType()) {
case URL:
sanitizerType = TestProxySanitizerType.URL.getName();
requestBody = createRegexRequestBody(null, sanitizer.getRegex(), sanitizer.getRedactedValue(),
sanitizer.getGroupForReplace());
break;
case BODY_REGEX:
sanitizerType = TestProxySanitizerType.BODY_REGEX.getName();
requestBody = createRegexRequestBody(null, sanitizer.getRegex(), sanitizer.getRedactedValue(),
sanitizer.getGroupForReplace());
break;
case BODY_KEY:
sanitizerType = TestProxySanitizerType.BODY_KEY.getName();
requestBody = createBodyJsonKeyRequestBody(sanitizer.getKey(), sanitizer.getRegex(),
sanitizer.getRedactedValue());
break;
case HEADER:
sanitizerType = HEADER.getName();
if (sanitizer.getKey() == null && sanitizer.getRegex() == null) {
throw new RuntimeException(
"Missing regexKey and/or headerKey for sanitizer type {" + sanitizerType + "}");
}
requestBody = createRegexRequestBody(sanitizer.getKey(), sanitizer.getRegex(), sanitizer.getRedactedValue(),
sanitizer.getGroupForReplace());
break;
default:
throw new RuntimeException("Sanitizer type {" + sanitizer.getType() + "} not supported");
}
sanitizersJsonPayloads.add("{\"Name\":\"" + sanitizerType + "\",\"Body\":" + requestBody + "}");
}
String requestBody = "[" + CoreUtils.stringJoin(",", sanitizersJsonPayloads) + "]";
return new HttpRequest(HttpMethod.POST, proxyUrl + "/Admin/AddSanitizers").setBody(requestBody);
} | class path
* @return The version string to use.
* @throws RuntimeException The eng folder could not be located in the repo.
* @throws UncheckedIOException The version file could not be read properly.
*/
public static String getTestProxyVersion(Path testClassPath) {
Path rootPath = TestUtils.getRepoRootResolveUntil(testClassPath, "eng");
Path versionFile = Paths.get("eng", "common", "testproxy", "target_version.txt");
rootPath = rootPath.resolve(versionFile);
try {
return Files.readAllLines(rootPath).get(0).replace(System.getProperty("line.separator"), "");
} catch (IOException e) {
throw new UncheckedIOException(e);
}
} | class path
* @return The version string to use.
* @throws RuntimeException The eng folder could not be located in the repo.
* @throws UncheckedIOException The version file could not be read properly.
*/
public static String getTestProxyVersion(Path testClassPath) {
Path rootPath = TestUtils.getRepoRootResolveUntil(testClassPath, "eng");
Path versionFile = Paths.get("eng", "common", "testproxy", "target_version.txt");
rootPath = rootPath.resolve(versionFile);
try {
return Files.readAllLines(rootPath).get(0).replace(System.getProperty("line.separator"), "");
} catch (IOException e) {
throw new UncheckedIOException(e);
}
} |
This API is public 😄 | public static HttpRequest createAddSanitizersRequest(List<TestProxySanitizer> sanitizers, URL proxyUrl) {
List<String> sanitizersJsonPayloads = new ArrayList<>(sanitizers.size());
for (TestProxySanitizer sanitizer : sanitizers) {
String requestBody;
String sanitizerType;
switch (sanitizer.getType()) {
case URL:
sanitizerType = TestProxySanitizerType.URL.getName();
requestBody = createRegexRequestBody(null, sanitizer.getRegex(), sanitizer.getRedactedValue(),
sanitizer.getGroupForReplace());
break;
case BODY_REGEX:
sanitizerType = TestProxySanitizerType.BODY_REGEX.getName();
requestBody = createRegexRequestBody(null, sanitizer.getRegex(), sanitizer.getRedactedValue(),
sanitizer.getGroupForReplace());
break;
case BODY_KEY:
sanitizerType = TestProxySanitizerType.BODY_KEY.getName();
requestBody = createBodyJsonKeyRequestBody(sanitizer.getKey(), sanitizer.getRegex(),
sanitizer.getRedactedValue());
break;
case HEADER:
sanitizerType = HEADER.getName();
if (sanitizer.getKey() == null && sanitizer.getRegex() == null) {
throw new RuntimeException(
"Missing regexKey and/or headerKey for sanitizer type {" + sanitizerType + "}");
}
requestBody = createRegexRequestBody(sanitizer.getKey(), sanitizer.getRegex(), sanitizer.getRedactedValue(),
sanitizer.getGroupForReplace());
break;
default:
throw new RuntimeException("Sanitizer type {" + sanitizer.getType() + "} not supported");
}
sanitizersJsonPayloads.add("{\"Name\":\"" + sanitizerType + "\",\"Body\":" + requestBody + "}");
}
String requestBody = "[" + CoreUtils.stringJoin(",", sanitizersJsonPayloads) + "]";
return new HttpRequest(HttpMethod.POST, proxyUrl +"/Admin/AddSanitizers").setBody(requestBody);
} | return new HttpRequest(HttpMethod.POST, proxyUrl +"/Admin/AddSanitizers").setBody(requestBody); | public static HttpRequest createAddSanitizersRequest(List<TestProxySanitizer> sanitizers, URL proxyUrl) {
List<String> sanitizersJsonPayloads = new ArrayList<>(sanitizers.size());
for (TestProxySanitizer sanitizer : sanitizers) {
String requestBody;
String sanitizerType;
switch (sanitizer.getType()) {
case URL:
sanitizerType = TestProxySanitizerType.URL.getName();
requestBody = createRegexRequestBody(null, sanitizer.getRegex(), sanitizer.getRedactedValue(),
sanitizer.getGroupForReplace());
break;
case BODY_REGEX:
sanitizerType = TestProxySanitizerType.BODY_REGEX.getName();
requestBody = createRegexRequestBody(null, sanitizer.getRegex(), sanitizer.getRedactedValue(),
sanitizer.getGroupForReplace());
break;
case BODY_KEY:
sanitizerType = TestProxySanitizerType.BODY_KEY.getName();
requestBody = createBodyJsonKeyRequestBody(sanitizer.getKey(), sanitizer.getRegex(),
sanitizer.getRedactedValue());
break;
case HEADER:
sanitizerType = HEADER.getName();
if (sanitizer.getKey() == null && sanitizer.getRegex() == null) {
throw new RuntimeException(
"Missing regexKey and/or headerKey for sanitizer type {" + sanitizerType + "}");
}
requestBody = createRegexRequestBody(sanitizer.getKey(), sanitizer.getRegex(), sanitizer.getRedactedValue(),
sanitizer.getGroupForReplace());
break;
default:
throw new RuntimeException("Sanitizer type {" + sanitizer.getType() + "} not supported");
}
sanitizersJsonPayloads.add("{\"Name\":\"" + sanitizerType + "\",\"Body\":" + requestBody + "}");
}
String requestBody = "[" + CoreUtils.stringJoin(",", sanitizersJsonPayloads) + "]";
return new HttpRequest(HttpMethod.POST, proxyUrl + "/Admin/AddSanitizers").setBody(requestBody);
} | class path
* @return The version string to use.
* @throws RuntimeException The eng folder could not be located in the repo.
* @throws UncheckedIOException The version file could not be read properly.
*/
public static String getTestProxyVersion(Path testClassPath) {
Path rootPath = TestUtils.getRepoRootResolveUntil(testClassPath, "eng");
Path versionFile = Paths.get("eng", "common", "testproxy", "target_version.txt");
rootPath = rootPath.resolve(versionFile);
try {
return Files.readAllLines(rootPath).get(0).replace(System.getProperty("line.separator"), "");
} catch (IOException e) {
throw new UncheckedIOException(e);
}
} | class path
* @return The version string to use.
* @throws RuntimeException The eng folder could not be located in the repo.
* @throws UncheckedIOException The version file could not be read properly.
*/
public static String getTestProxyVersion(Path testClassPath) {
Path rootPath = TestUtils.getRepoRootResolveUntil(testClassPath, "eng");
Path versionFile = Paths.get("eng", "common", "testproxy", "target_version.txt");
rootPath = rootPath.resolve(versionFile);
try {
return Files.readAllLines(rootPath).get(0).replace(System.getProperty("line.separator"), "");
} catch (IOException e) {
throw new UncheckedIOException(e);
}
} |
```suggestion fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName()); ``` | public void createIfNotExistsPermissionsAndUmask() {
fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"),Context.NONE), 201);
} | fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName()); | public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext( r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value={"null, null, null, null, null","control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl,String contentDisposition,String contentEncoding,
String contentLanguage,String contentType) {
PathHttpHeaders headers=new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value={"null,null,null,null","foo,bar,fizz,buzz"},nullValues="null")
public void createIfNotExistsMetadata(String key1,String value1,String key2,String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata,r.getMetadata()))
.verifyComplete();
}
@Test
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient=primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options=new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value={"null,null,null,null,null,application/octet-stream","control,disposition,encoding,language,null,type"},
nullValues="null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,String contentEncoding,
String contentLanguage, byte[] contentMD5,String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value={"null,null,null,null","foo,bar,fizz,buzz"},nullValues="null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for(String k : metadata.keySet()){
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId=CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null,null,null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match,String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r ->{
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), piece.array());
});
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
})
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false))
.assertNext(r -> {
StepVerifier.create(r.getValue())
.verifyErrorSatisfies(p -> {
assertInstanceOf(IOException.class, p);
});
})
.verifyComplete();
/*StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false))
.verifyErrorSatisfies(r -> {
RuntimeException e = assertInstanceOf(RuntimeException.class, r);
assertInstanceOf(IOException.class, e.getCause());
});*/
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
try {
readData.write(piece.array());
assertEquals(expectedData, readData.toString());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
})
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(failureFileAsyncClient.read())
.assertNext(r -> {
try {
downloadData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
assertEquals(DATA.getDefaultText(), downloadData.toString());
})
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
compareFiles(file,outFile,0,fileSize);
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null,null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination,
null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(200, p.getStatusCode()))
.verifyComplete();
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(fc.read())
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r.array());
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] readArray = readData.toByteArray();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true);
fc.append(BinaryData.fromString(b.toString()), 0);
fc.flush(b.length(), true);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
32
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> {
piece.getValue().flatMap(r -> {
try {
queryData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return null;
}).blockLast();
return null;
}).block();
byte[] queryArray = queryData.toByteArray();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> {
piece.getValue().flatMap(r -> {
try {
queryData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return Mono.empty();
}).blockLast();
return null;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
fc.queryWithResponse(optionsOs)
.flatMap(piece -> {
piece.getValue().flatMap(r -> {
try {
queryData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return Mono.empty();
}).blockLast();
return null;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
fc.queryWithResponse(optionsOs)
.flatMap(piece -> {
piece.getValue().flatMap(r -> {
try {
queryData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return Mono.empty();
}).blockLast();
return null;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
fc.queryWithResponse(optionsOs)
.flatMap(piece -> {
piece.getValue().flatMap(r -> {
try {
queryData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return Mono.empty();
}).blockLast();
return null;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] readArray = readData.toByteArray();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false)
.flatMap(r -> {
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), bytes))
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.verifyError(IOException.class);
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.assertNext(bytes -> assertArrayEquals(expectedData.getBytes(), bytes))
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination, null, null, null)
.flatMap(r -> {
assertEquals(201, r.getStatusCode());
return r.getValue().getPropertiesWithResponse(null);
}))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
1,
32,
256,
400,
4000
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = new ByteArrayOutputStream();
FluxUtil.writeToOutputStream(fc.read(), readData).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} |
I would recommend using `testResourceNamer.randomUuid()` here so the test recording captures the UUID | public void createIfNotExistsOptionsWithLeaseId() {
fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId=CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
} | String leaseId=CoreUtils.randomUuid().toString(); | public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext( r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value={"null, null, null, null, null","control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl,String contentDisposition,String contentEncoding,
String contentLanguage,String contentType) {
PathHttpHeaders headers=new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value={"null,null,null,null","foo,bar,fizz,buzz"},nullValues="null")
public void createIfNotExistsMetadata(String key1,String value1,String key2,String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata,r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"),Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient=primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options=new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value={"null,null,null,null,null,application/octet-stream","control,disposition,encoding,language,null,type"},
nullValues="null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,String contentEncoding,
String contentLanguage, byte[] contentMD5,String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value={"null,null,null,null","foo,bar,fizz,buzz"},nullValues="null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for(String k : metadata.keySet()){
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null,null,null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match,String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r ->{
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), piece.array());
});
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
})
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false))
.assertNext(r -> {
StepVerifier.create(r.getValue())
.verifyErrorSatisfies(p -> {
assertInstanceOf(IOException.class, p);
});
})
.verifyComplete();
/*StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false))
.verifyErrorSatisfies(r -> {
RuntimeException e = assertInstanceOf(RuntimeException.class, r);
assertInstanceOf(IOException.class, e.getCause());
});*/
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
try {
readData.write(piece.array());
assertEquals(expectedData, readData.toString());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
})
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(failureFileAsyncClient.read())
.assertNext(r -> {
try {
downloadData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
assertEquals(DATA.getDefaultText(), downloadData.toString());
})
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
compareFiles(file,outFile,0,fileSize);
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null,null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination,
null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(200, p.getStatusCode()))
.verifyComplete();
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(fc.read())
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r.array());
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] readArray = readData.toByteArray();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true);
fc.append(BinaryData.fromString(b.toString()), 0);
fc.flush(b.length(), true);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
32
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> {
piece.getValue().flatMap(r -> {
try {
queryData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return null;
}).blockLast();
return null;
}).block();
byte[] queryArray = queryData.toByteArray();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> {
piece.getValue().flatMap(r -> {
try {
queryData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return Mono.empty();
}).blockLast();
return null;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
fc.queryWithResponse(optionsOs)
.flatMap(piece -> {
piece.getValue().flatMap(r -> {
try {
queryData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return Mono.empty();
}).blockLast();
return null;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
fc.queryWithResponse(optionsOs)
.flatMap(piece -> {
piece.getValue().flatMap(r -> {
try {
queryData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return Mono.empty();
}).blockLast();
return null;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
fc.queryWithResponse(optionsOs)
.flatMap(piece -> {
piece.getValue().flatMap(r -> {
try {
queryData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return Mono.empty();
}).blockLast();
return null;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] readArray = readData.toByteArray();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false)
.flatMap(r -> {
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), bytes))
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.verifyError(IOException.class);
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.assertNext(bytes -> assertArrayEquals(expectedData.getBytes(), bytes))
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination, null, null, null)
.flatMap(r -> {
assertEquals(201, r.getStatusCode());
return r.getValue().getPropertiesWithResponse(null);
}))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
1,
32,
256,
400,
4000
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = new ByteArrayOutputStream();
FluxUtil.writeToOutputStream(fc.read(), readData).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} |
I might recommend using `FluxUtil.collectBytesInByteBufferStream` to convert the `read` to a `byte[]` and then that can be compared to `DATA.getDefaultBytes()` ```suggestion StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read())) .assertNext(bytes -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), bytes)) .verifyComplete(); ``` | public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(failureFileAsyncClient.read())
.assertNext(r -> {
try {
downloadData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
assertEquals(DATA.getDefaultText(), downloadData.toString());
})
.verifyComplete();
} | StepVerifier.create(failureFileAsyncClient.read()) | public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext( r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value={"null, null, null, null, null","control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl,String contentDisposition,String contentEncoding,
String contentLanguage,String contentType) {
PathHttpHeaders headers=new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value={"null,null,null,null","foo,bar,fizz,buzz"},nullValues="null")
public void createIfNotExistsMetadata(String key1,String value1,String key2,String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata,r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"),Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient=primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options=new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value={"null,null,null,null,null,application/octet-stream","control,disposition,encoding,language,null,type"},
nullValues="null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,String contentEncoding,
String contentLanguage, byte[] contentMD5,String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value={"null,null,null,null","foo,bar,fizz,buzz"},nullValues="null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for(String k : metadata.keySet()){
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId=CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc=dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null,null,null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match,String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r ->{
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), piece.array());
});
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
})
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false))
.assertNext(r -> {
StepVerifier.create(r.getValue())
.verifyErrorSatisfies(p -> {
assertInstanceOf(IOException.class, p);
});
})
.verifyComplete();
/*StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false))
.verifyErrorSatisfies(r -> {
RuntimeException e = assertInstanceOf(RuntimeException.class, r);
assertInstanceOf(IOException.class, e.getCause());
});*/
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
try {
readData.write(piece.array());
assertEquals(expectedData, readData.toString());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
})
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
compareFiles(file,outFile,0,fileSize);
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null,null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination,
null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(200, p.getStatusCode()))
.verifyComplete();
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(fc.read())
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r.array());
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] readArray = readData.toByteArray();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(fc.read())
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r.array()))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true);
fc.append(BinaryData.fromString(b.toString()), 0);
fc.flush(b.length(), true);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
32
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> {
piece.getValue().flatMap(r -> {
try {
queryData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return null;
}).blockLast();
return null;
}).block();
byte[] queryArray = queryData.toByteArray();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> {
piece.getValue().flatMap(r -> {
try {
queryData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return Mono.empty();
}).blockLast();
return null;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
fc.queryWithResponse(optionsOs)
.flatMap(piece -> {
piece.getValue().flatMap(r -> {
try {
queryData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return Mono.empty();
}).blockLast();
return null;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
fc.queryWithResponse(optionsOs)
.flatMap(piece -> {
piece.getValue().flatMap(r -> {
try {
queryData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return Mono.empty();
}).blockLast();
return null;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
fc.queryWithResponse(optionsOs)
.flatMap(piece -> {
piece.getValue().flatMap(r -> {
try {
queryData.write(r.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return Mono.empty();
}).blockLast();
return null;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] readArray = readData.toByteArray();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false)
.flatMap(r -> {
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), bytes))
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.verifyError(IOException.class);
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.assertNext(bytes -> assertArrayEquals(expectedData.getBytes(), bytes))
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination, null, null, null)
.flatMap(r -> {
assertEquals(201, r.getStatusCode());
return r.getValue().getPropertiesWithResponse(null);
}))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
1,
32,
256,
400,
4000
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = new ByteArrayOutputStream();
FluxUtil.writeToOutputStream(fc.read(), readData).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} |
Having a `StepVerifer` within a `StepVerifier` isn't always safe. I'd recommend a small change to this test: ```java StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(), null, null, null) .flatMap(r -> r.getValue().getPropertiesWithResponse(null))) .assertNext(p -> assertEquals(p.getStatusCode(), 200)) .verifyComplete(); ``` This will perform the same logic in renaming the file system and then trying to get properties using the new file system returned. | public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
} | .verifyComplete(); | public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
} | class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
} | class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
} |
The `r.getValue().getPropertiesWithResponse(null)` is never subscribed to and will never run. I'd change this test slightly ```java StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination, null, null, null) .flatMap(r -> { assertEquals(201, r.getStatusCode()); return r.getValue().getPropertiesWithResponse(null); })) .assertNext(piece -> assertEquals(200, piece.getStatusCode())) .verifyComplete(); ``` Don't be afraid to put assertions into map and flapMap operators if the test requires chaining multiple API calls like this. | public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination,
null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
r.getValue().getPropertiesWithResponse(null).flatMap(piece -> {
assertEquals(200, piece.getStatusCode());
return null;
});
})
.verifyComplete();
} | .verifyComplete(); | public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination, null, null, null)
.flatMap(r -> {
assertEquals(201, r.getStatusCode());
return r.getValue().getPropertiesWithResponse(null);
}))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
} | class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
} | class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
} |
This is missing a `verify` call | public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
} | .assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext())); | public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), piece.array());
});
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
})
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false))
.assertNext(r -> {
StepVerifier.create(r.getValue())
.verifyErrorSatisfies(p -> {
assertInstanceOf(IOException.class, p);
});
})
.verifyComplete();
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
try {
readData.write(piece.array());
assertEquals(expectedData, readData.toString());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
})
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> {
try {
downloadData.write(r);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
assertEquals(DATA.getDefaultText(), downloadData.toString());
})
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
compareFiles(file, outFile, 0, fileSize);
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination,
null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
r.getValue().getPropertiesWithResponse(null).flatMap(piece -> {
assertEquals(200, piece.getStatusCode());
return null;
});
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
32
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
/*StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)))
.expectError(IllegalArgumentException.class)
.verify();*/
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false)
.flatMap(r -> {
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), bytes))
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.verifyError(IOException.class);
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.assertNext(bytes -> assertArrayEquals(expectedData.getBytes(), bytes))
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination, null, null, null)
.flatMap(r -> {
assertEquals(201, r.getStatusCode());
return r.getValue().getPropertiesWithResponse(null);
}))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
1,
32,
256,
400,
4000
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = new ByteArrayOutputStream();
FluxUtil.writeToOutputStream(fc.read(), readData).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} |
I wouldn't recommend making a subscribe call like this in `assertNext`. Instead validate the headers in a flatMap call that creates the array. ```java StepVerifier.create(fc.readWithResponse(null, null, null, false) .flatMap(r -> { // Verify headers here return FluxUtil.collectBytesInByteBufferStream(r.getValue()); }) .assertNext(bytes -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), bytes)) .verifyComplete(); ``` | public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), piece.array());
});
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
})
.verifyComplete();
} | }); | public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false)
.flatMap(r -> {
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), bytes))
.verifyComplete();
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false))
.assertNext(r -> {
StepVerifier.create(r.getValue())
.verifyErrorSatisfies(p -> {
assertInstanceOf(IOException.class, p);
});
})
.verifyComplete();
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
try {
readData.write(piece.array());
assertEquals(expectedData, readData.toString());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
})
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> {
try {
downloadData.write(r);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
assertEquals(DATA.getDefaultText(), downloadData.toString());
})
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
compareFiles(file, outFile, 0, fileSize);
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination,
null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
r.getValue().getPropertiesWithResponse(null).flatMap(piece -> {
assertEquals(200, piece.getStatusCode());
return null;
});
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
32
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
/*StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)))
.expectError(IllegalArgumentException.class)
.verify();*/
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.verifyError(IOException.class);
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.assertNext(bytes -> assertArrayEquals(expectedData.getBytes(), bytes))
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination, null, null, null)
.flatMap(r -> {
assertEquals(201, r.getStatusCode());
return r.getValue().getPropertiesWithResponse(null);
}))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
1,
32,
256,
400,
4000
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = new ByteArrayOutputStream();
FluxUtil.writeToOutputStream(fc.read(), readData).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} |
Similar comment as elsewhere, don't put StepVerifier in StepVerifier. | public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false))
.assertNext(r -> {
StepVerifier.create(r.getValue())
.verifyErrorSatisfies(p -> {
assertInstanceOf(IOException.class, p);
});
})
.verifyComplete();
} | .verifyComplete(); | public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.verifyError(IOException.class);
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), piece.array());
});
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
})
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
try {
readData.write(piece.array());
assertEquals(expectedData, readData.toString());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
})
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> {
try {
downloadData.write(r);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
assertEquals(DATA.getDefaultText(), downloadData.toString());
})
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
compareFiles(file, outFile, 0, fileSize);
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination,
null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
r.getValue().getPropertiesWithResponse(null).flatMap(piece -> {
assertEquals(200, piece.getStatusCode());
return null;
});
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
32
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
/*StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)))
.expectError(IllegalArgumentException.class)
.verify();*/
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false)
.flatMap(r -> {
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), bytes))
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.assertNext(bytes -> assertArrayEquals(expectedData.getBytes(), bytes))
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination, null, null, null)
.flatMap(r -> {
assertEquals(201, r.getStatusCode());
return r.getValue().getPropertiesWithResponse(null);
}))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
1,
32,
256,
400,
4000
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = new ByteArrayOutputStream();
FluxUtil.writeToOutputStream(fc.read(), readData).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} |
Same comment about subscribing in assertNext | public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
try {
readData.write(piece.array());
assertEquals(expectedData, readData.toString());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
})
.verifyComplete();
} | r.getValue().subscribe(piece -> { | public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.assertNext(bytes -> assertArrayEquals(expectedData.getBytes(), bytes))
.verifyComplete();
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), piece.array());
});
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
})
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false))
.assertNext(r -> {
StepVerifier.create(r.getValue())
.verifyErrorSatisfies(p -> {
assertInstanceOf(IOException.class, p);
});
})
.verifyComplete();
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> {
try {
downloadData.write(r);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
assertEquals(DATA.getDefaultText(), downloadData.toString());
})
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
compareFiles(file, outFile, 0, fileSize);
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination,
null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
r.getValue().getPropertiesWithResponse(null).flatMap(piece -> {
assertEquals(200, piece.getStatusCode());
return null;
});
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
32
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
/*StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)))
.expectError(IllegalArgumentException.class)
.verify();*/
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false)
.flatMap(r -> {
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), bytes))
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.verifyError(IOException.class);
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination, null, null, null)
.flatMap(r -> {
assertEquals(201, r.getStatusCode());
return r.getValue().getPropertiesWithResponse(null);
}))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
1,
32,
256,
400,
4000
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = new ByteArrayOutputStream();
FluxUtil.writeToOutputStream(fc.read(), readData).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} |
Use `FluxUtil.collectBytesInByteBufferStream` instead of writing to the OutputStream | public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
try {
readData.write(piece.array());
assertEquals(expectedData, readData.toString());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
})
.verifyComplete();
} | readData.write(piece.array()); | public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.assertNext(bytes -> assertArrayEquals(expectedData.getBytes(), bytes))
.verifyComplete();
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), piece.array());
});
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
})
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false))
.assertNext(r -> {
StepVerifier.create(r.getValue())
.verifyErrorSatisfies(p -> {
assertInstanceOf(IOException.class, p);
});
})
.verifyComplete();
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> {
try {
downloadData.write(r);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
assertEquals(DATA.getDefaultText(), downloadData.toString());
})
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
compareFiles(file, outFile, 0, fileSize);
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination,
null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
r.getValue().getPropertiesWithResponse(null).flatMap(piece -> {
assertEquals(200, piece.getStatusCode());
return null;
});
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
32
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
/*StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)))
.expectError(IllegalArgumentException.class)
.verify();*/
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false)
.flatMap(r -> {
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), bytes))
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.verifyError(IOException.class);
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination, null, null, null)
.flatMap(r -> {
assertEquals(201, r.getStatusCode());
return r.getValue().getPropertiesWithResponse(null);
}))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
1,
32,
256,
400,
4000
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = new ByteArrayOutputStream();
FluxUtil.writeToOutputStream(fc.read(), readData).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} |
This can be simplified to `TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r)` | public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> {
try {
downloadData.write(r);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
assertEquals(DATA.getDefaultText(), downloadData.toString());
})
.verifyComplete();
} | assertEquals(DATA.getDefaultText(), downloadData.toString()); | public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), piece.array());
});
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
})
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false))
.assertNext(r -> {
StepVerifier.create(r.getValue())
.verifyErrorSatisfies(p -> {
assertInstanceOf(IOException.class, p);
});
})
.verifyComplete();
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
try {
readData.write(piece.array());
assertEquals(expectedData, readData.toString());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
})
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
compareFiles(file, outFile, 0, fileSize);
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination,
null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
r.getValue().getPropertiesWithResponse(null).flatMap(piece -> {
assertEquals(200, piece.getStatusCode());
return null;
});
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
32
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
/*StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)))
.expectError(IllegalArgumentException.class)
.verify();*/
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false)
.flatMap(r -> {
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), bytes))
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.verifyError(IOException.class);
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.assertNext(bytes -> assertArrayEquals(expectedData.getBytes(), bytes))
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination, null, null, null)
.flatMap(r -> {
assertEquals(201, r.getStatusCode());
return r.getValue().getPropertiesWithResponse(null);
}))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
1,
32,
256,
400,
4000
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = new ByteArrayOutputStream();
FluxUtil.writeToOutputStream(fc.read(), readData).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} |
I'd move this to outside of a StepVerifier call chain and in this assertNext just verify a response was returned and then call this after the StepVerifier call | public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
} | assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8)); | public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), piece.array());
});
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
})
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false))
.assertNext(r -> {
StepVerifier.create(r.getValue())
.verifyErrorSatisfies(p -> {
assertInstanceOf(IOException.class, p);
});
})
.verifyComplete();
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
try {
readData.write(piece.array());
assertEquals(expectedData, readData.toString());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
})
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> {
try {
downloadData.write(r);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
assertEquals(DATA.getDefaultText(), downloadData.toString());
})
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
compareFiles(file, outFile, 0, fileSize);
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination,
null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
r.getValue().getPropertiesWithResponse(null).flatMap(piece -> {
assertEquals(200, piece.getStatusCode());
return null;
});
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
32
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
/*StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)))
.expectError(IllegalArgumentException.class)
.verify();*/
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false)
.flatMap(r -> {
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), bytes))
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.verifyError(IOException.class);
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.assertNext(bytes -> assertArrayEquals(expectedData.getBytes(), bytes))
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination, null, null, null)
.flatMap(r -> {
assertEquals(201, r.getStatusCode());
return r.getValue().getPropertiesWithResponse(null);
}))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
1,
32,
256,
400,
4000
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = new ByteArrayOutputStream();
FluxUtil.writeToOutputStream(fc.read(), readData).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} |
I'd move the `compareFiles` call to outside the StepVerifier call, anything interacting with IO needs extra care when dealing with Reactor to prevent blocking threads. | public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
compareFiles(file, outFile, 0, fileSize);
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
} | compareFiles(file, outFile, 0, fileSize); | public void downloadFile(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null))
.assertNext(r -> {
assertEquals(fileSize, r.getValue().getFileSize());
})
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()));
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), piece.array());
});
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
})
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false))
.assertNext(r -> {
StepVerifier.create(r.getValue())
.verifyErrorSatisfies(p -> {
assertInstanceOf(IOException.class, p);
});
})
.verifyComplete();
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false))
.assertNext(r -> {
r.getValue().subscribe(piece -> {
try {
readData.write(piece.array());
assertEquals(expectedData, readData.toString());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
})
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> {
try {
downloadData.write(r);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
assertEquals(DATA.getDefaultText(), downloadData.toString());
})
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null))
.assertNext(r -> {
StepVerifier.create(r.getValue().getPropertiesWithResponse(null))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination,
null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
r.getValue().getPropertiesWithResponse(null).flatMap(piece -> {
assertEquals(200, piece.getStatusCode());
return null;
});
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
32
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = fc.read().reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
/*StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)))
.expectError(IllegalArgumentException.class)
.verify();*/
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} | class FileAsyncApiTests extends DataLakeTestBase {
private DataLakeFileAsyncClient fc;
private final List<File> createdFiles = new ArrayList<>();
private static final PathPermissions PERMISSIONS = new PathPermissions()
.setOwner(new RolePermissions().setReadPermission(true).setWritePermission(true).setExecutePermission(true))
.setGroup(new RolePermissions().setReadPermission(true).setExecutePermission(true))
.setOther(new RolePermissions().setReadPermission(true));
private static final String GROUP = null;
private static final String OWNER = null;
private static final List<PathAccessControlEntry> PATH_ACCESS_CONTROL_ENTRIES =
PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
@BeforeEach
public void setup() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@AfterEach
public void cleanup() {
createdFiles.forEach(File::delete);
}
@Test
public void createMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.create())
.assertNext(r -> assertNotEquals(null, r))
.verifyComplete();
}
@Test
public void createDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createWithResponse(
null, null, null, null, new DataLakeRequestConditions().setIfMatch("garbage")))
.verifyError(DataLakeStorageException.class);
}
@Test
public void createOverwrite() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.create(false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void exists() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void doesNotExist() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.exists())
.expectNext(false)
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void createHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createWithResponse(null, null, headers, null, null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType);
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc.createWithResponse(null, null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
private static boolean olderThan20210410ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2021_04_10);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void createAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.createWithResponse(null, null, null, null, drc), 201);
}
private static Stream<Arguments> modifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(null, null, null, null, null),
Arguments.of(OLD_DATE, null, null, null, null),
Arguments.of(null, NEW_DATE, null, null, null),
Arguments.of(null, null, RECEIVED_ETAG, null, null),
Arguments.of(null, null, null, GARBAGE_ETAG, null),
Arguments.of(null, null, null, null, RECEIVED_LEASE_ID)
);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void createACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.createWithResponse(null, null, null, null, drc))
.verifyError(DataLakeStorageException.class);
}
private static Stream<Arguments> invalidModifiedMatchAndLeaseIdSupplier() {
return Stream.of(
Arguments.of(NEW_DATE, null, null, null, null),
Arguments.of(null, OLD_DATE, null, null, null),
Arguments.of(null, null, GARBAGE_ETAG, null, null),
Arguments.of(null, null, null, RECEIVED_ETAG, null),
Arguments.of(null, null, null, null, GARBAGE_LEASE_ID)
);
}
@Test
public void createPermissionsAndUmask() {
assertAsyncResponseStatusCode(fc.createWithResponse(
"0777", "0057", null, null, null), 201);
}
private static boolean olderThan20201206ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_12_06);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithACL() {
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithOwnerAndGroup() {
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createOptionsWithNullOwnerAndGroup() {
fc.createWithResponse(null, null);
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createOptionsWithMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createOptionsWithPermissionsAndUmask() {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseId() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
@Test
public void createOptionsWithLeaseIdError() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithLeaseDuration() {
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
}
private static Stream<DataLakePathScheduleDeletionOptions> timeExpiresOnOptionsSupplier() {
return Stream.of(new DataLakePathScheduleDeletionOptions(OffsetDateTime.now().plusDays(1)), null);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createOptionsWithTimeToExpireRelativeToNow() {
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void createIfNotExistsMin() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
}
@Test
public void createIfNotExistsDefaults() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null))
.assertNext(r -> {
assertEquals(201, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOverwrite() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
201);
StepVerifier.create(fc.exists())
.expectNext(true)
.verifyComplete();
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null),
409);
}
@Test
public void createIfNotExistsExists() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExists().block();
assertTrue(fc.exists().block());
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control, disposition, encoding, language, type"})
public void createIfNotExistsHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
contentType = (contentType == null) ? "application/octet-stream" : contentType;
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setPathHttpHeaders(headers), null).block();
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding,
contentLanguage, null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsMetadata(String key1, String value1, String key2, String value2) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions().setMetadata(metadata), Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@Test
public void createIfNotExistsPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(new DataLakePathCreateOptions()
.setPermissions("0777").setUmask("0057"), Context.NONE), 201);
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void createIfNotExistsEncryptionContext() {
dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
dataLakeFileSystemAsyncClient.create().block();
dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String encryptionContext = "encryptionContext";
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setEncryptionContext(encryptionContext);
fc.createIfNotExistsWithResponse(options, Context.NONE).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
StepVerifier.create(fc.readWithResponse(null, null, null, false))
.assertNext(r -> assertEquals(encryptionContext, r.getDeserializedHeaders().getEncryptionContext()))
.verifyComplete();
StepVerifier.create(dataLakeFileSystemAsyncClient.listPaths(new ListPathsOptions().setRecursive(true)))
.expectNextCount(1)
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithACL() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<PathAccessControlEntry> pathAccessControlEntries = PathAccessControlEntry.parseList("user::rwx,group::r--,other::---,mask::rwx");
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setAccessControlList(pathAccessControlEntries);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(pathAccessControlEntries.get(0), r.getAccessControlList().get(0));
assertEquals(pathAccessControlEntries.get(1), r.getAccessControlList().get(1));
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String ownerName = testResourceNamer.randomUuid();
String groupName = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(ownerName).setGroup(groupName);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals(ownerName, r.getOwner());
assertEquals(groupName, r.getGroup());
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithNullOwnerAndGroup() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setOwner(null).setGroup(null);
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertEquals("$superuser", r.getOwner());
assertEquals("$superuser", r.getGroup());
})
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null,application/octet-stream", "control,disposition,encoding,language,null,type"},
nullValues = "null")
public void createIfNotExistsOptionsWithPathHttpHeaders(String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPathHttpHeaders(putHeaders);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void createIfNotExistsOptionsWithMetadata(String key1, String value1, String key2, String value2) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setMetadata(metadata);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
for (String k : metadata.keySet()) {
assertTrue(r.getMetadata().containsKey(k));
assertEquals(metadata.get(k), r.getMetadata().get(k));
}
})
.verifyComplete();
}
@Test
public void createIfNotExistsOptionsWithPermissionsAndUmask() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setPermissions("0777").setUmask("0057");
fc.createIfNotExistsWithResponse(options, null).block();
StepVerifier.create(fc.getAccessControlWithResponse(
true, null, null))
.assertNext(r -> assertEquals(PathPermissions.parseSymbolic("rwx-w----").toString(),
r.getValue().getPermissions().toString()))
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseId() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = testResourceNamer.randomUuid();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId).setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@Test
public void createIfNotExistsOptionsWithLeaseIdError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setProposedLeaseId(leaseId);
StepVerifier.create(fc.createIfNotExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithLeaseDuration() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
String leaseId = CoreUtils.randomUuid().toString();
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setLeaseDuration(15).setProposedLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20201206ServiceVersion")
@ParameterizedTest
@MethodSource("timeExpiresOnOptionsSupplier")
public void createIfNotExistsOptionsWithTimeExpiresOn(DataLakePathScheduleDeletionOptions deletionOptions) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathCreateOptions options = new DataLakePathCreateOptions().setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
}
@DisabledIf("olderThan20201206ServiceVersion")
@Test
public void createIfNotExistsOptionsWithTimeToExpireRelativeToNow() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
DataLakePathScheduleDeletionOptions deletionOptions = new DataLakePathScheduleDeletionOptions(Duration.ofDays(6));
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setScheduleDeletionOptions(deletionOptions);
assertAsyncResponseStatusCode(fc.createIfNotExistsWithResponse(options, null), 201);
StepVerifier.create(fc.getProperties())
.assertNext(r -> compareDatesWithPrecision(r.getExpiresOn(), r.getCreationTime().plusDays(6)))
.verifyComplete();
}
@Test
public void deleteMin() {
assertAsyncResponseStatusCode(fc.deleteWithResponse(
null, null, null), 200);
}
@Test
public void deleteFileDoesNotExistAnymore() {
fc.deleteWithResponse(null, null, null).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyErrorSatisfies(r -> DataLakeTestBase.assertExceptionStatusCodeAndMessage(r, 404,
BlobErrorCode.BLOB_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.deleteWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.deleteWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExists() {
StepVerifier.create(fc.deleteIfExists())
.expectNext(true)
.verifyComplete();
}
@Test
public void deleteIfExistsMin() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
}
@Test
public void deleteIfExistsFileDoesNotExistAnymore() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
StepVerifier.create(fc.getPropertiesWithResponse(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 200);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(null, null), 404);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setIsRecursive(false).setRequestConditions(drc);
assertAsyncResponseStatusCode(fc.deleteIfExistsWithResponse(options, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void deleteIfExistsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
DataLakePathDeleteOptions options = new DataLakePathDeleteOptions().setRequestConditions(drc);
StepVerifier.create(fc.deleteIfExistsWithResponse(options, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsMin() {
StepVerifier.create(fc.setPermissions(PERMISSIONS, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setPermissionsWithResponse() {
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null),
200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setPermissionsAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setPermissionsACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setPermissionsError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setPermissionsWithResponse(PERMISSIONS, GROUP, OWNER, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLMin() {
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.assertNext(r -> {
assertNotNull(r.getETag());
assertNotNull(r.getLastModified());
})
.verifyComplete();
}
@Test
public void setACLWithResponse() {
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(
PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setAclAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc),
200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setAclACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setAccessControlListWithResponse(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setACLError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES, GROUP, OWNER))
.verifyError(DataLakeStorageException.class);
}
private static boolean olderThan20200210ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_02_10);
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void setACLRecursive() {
StepVerifier.create(fc.setAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void updateACLRecursive() {
StepVerifier.create(fc.updateAccessControlRecursive(PATH_ACCESS_CONTROL_ENTRIES))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@DisabledIf("olderThan20200210ServiceVersion")
@Test
public void removeACLRecursive() {
List<PathRemoveAccessControlEntry> removeAccessControlEntries = PathRemoveAccessControlEntry.parseList(
"mask,default:user,default:group,user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "group:ec3595d6-2c17-4696-8caa-7e139758d24a,default:user:ec3595d6-2c17-4696-8caa-7e139758d24a,"
+ "default:group:ec3595d6-2c17-4696-8caa-7e139758d24a");
StepVerifier.create(fc.removeAccessControlRecursive(removeAccessControlEntries))
.assertNext(r -> {
assertEquals(0L, r.getCounters().getChangedDirectoriesCount());
assertEquals(1L, r.getCounters().getChangedFilesCount());
assertEquals(0L, r.getCounters().getFailedChangesCount());
})
.verifyComplete();
}
@Test
public void getAccessControlMin() {
StepVerifier.create(fc.getAccessControl())
.assertNext(r -> {
assertNotNull(r.getAccessControlList());
assertNotNull(r.getPermissions());
assertNotNull(r.getOwner());
assertNotNull(r.getGroup());
})
.verifyComplete();
}
@Test
public void getAccessControlWithResponse() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, null, null), 200);
}
@Test
public void getAccessControlReturnUpn() {
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
true, null, null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getAccessControlAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getAccessControlWithResponse(
false, drc, null), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getAccessControlACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match,
String noneMatch, String leaseID) {
if (GARBAGE_LEASE_ID.equals(leaseID)) {
return;
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getAccessControlWithResponse(false, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesDefault() {
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
PathProperties properties = r.getValue();
validateBasicHeaders(headers);
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNotNull(properties.getCreationTime());
assertNotNull(properties.getLastModified());
assertNotNull(properties.getETag());
assertTrue(properties.getFileSize() >= 0);
assertNotNull(properties.getContentType());
assertNull(properties.getContentMd5());
assertNull(properties.getContentEncoding());
assertNull(properties.getContentDisposition());
assertNull(properties.getContentLanguage());
assertNull(properties.getCacheControl());
assertEquals(LeaseStatusType.UNLOCKED, properties.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, properties.getLeaseState());
assertNull(properties.getLeaseDuration());
assertNull(properties.getCopyId());
assertNull(properties.getCopyStatus());
assertNull(properties.getCopySource());
assertNull(properties.getCopyProgress());
assertNull(properties.getCopyCompletionTime());
assertNull(properties.getCopyStatusDescription());
assertTrue(properties.isServerEncrypted());
assertFalse(properties.isIncrementalCopy() != null && properties.isIncrementalCopy());
assertEquals(AccessTier.HOT, properties.getAccessTier());
assertNull(properties.getArchiveStatus());
assertTrue(CoreUtils.isNullOrEmpty(properties.getMetadata()));
assertNull(properties.getAccessTierChangeTime());
assertNull(properties.getEncryptionKeySha256());
assertFalse(properties.isDirectory());
})
.verifyComplete();
}
@Test
public void getPropertiesMin() {
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(null), 200);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void getPropertiesAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.getPropertiesWithResponse(drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void getPropertiesACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.getPropertiesWithResponse(drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void getPropertiesError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
DataLakeStorageException ex = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(ex.getMessage().contains("BlobNotFound"));
});
}
@Test
public void setHTTPHeadersNull() {
StepVerifier.create(fc.setHttpHeadersWithResponse(null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
validateBasicHeaders(r.getHeaders());
})
.verifyComplete();
}
@Test
public void setHTTPHeadersMin() throws NoSuchAlgorithmException {
PathProperties properties = fc.getProperties().block();
PathHttpHeaders headers = new PathHttpHeaders()
.setContentEncoding(properties.getContentEncoding())
.setContentDisposition(properties.getContentDisposition())
.setContentType("type")
.setCacheControl(properties.getCacheControl())
.setContentLanguage(properties.getContentLanguage())
.setContentMd5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())));
fc.setHttpHeaders(headers).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals("type", r.getContentType()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("setHTTPHeadersHeadersSupplier")
public void setHTTPHeadersHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, byte[] contentMD5, String contentType) {
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
PathHttpHeaders putHeaders = new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType);
fc.setHttpHeaders(putHeaders).block();
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
contentMD5, contentType))
.verifyComplete();
}
private static Stream<Arguments> setHTTPHeadersHeadersSupplier() throws NoSuchAlgorithmException {
return Stream.of(
Arguments.of(null, null, null, null, null, null),
Arguments.of("control", "disposition", "encoding", "language",
Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type")
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setHttpHeadersWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setHttpHeadersACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setHttpHeadersWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setHTTPHeadersError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setHttpHeaders(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataMin() {
Map<String, String> metadata = Collections.singletonMap("foo", "bar");
fc.setMetadata(metadata).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200"}, nullValues = "null")
public void setMetadataMetadata(String key1, String value1, String key2, String value2, int statusCode) {
Map<String, String> metadata = new HashMap<>();
if (key1 != null && value1 != null) {
metadata.put(key1, value1);
}
if (key2 != null && value2 != null) {
metadata.put(key2, value2);
}
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(metadata, null), statusCode);
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void setMetadataAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.setMetadataWithResponse(null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void setMetadataACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.setMetadataWithResponse(null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void setMetadataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.setMetadata(null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readAllNull() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(null, null, null, false)
.flatMap(r -> {
HttpHeaders headers = r.getHeaders();
assertFalse(headers.stream().anyMatch(h -> h.getName().startsWith("x-ms-meta-")));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_LENGTH));
assertNotNull(headers.getValue(HttpHeaderName.CONTENT_TYPE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_RANGE));
assertNull(headers.getValue(HttpHeaderName.CONTENT_ENCODING));
assertNull(headers.getValue(HttpHeaderName.CACHE_CONTROL));
assertNull(headers.getValue(HttpHeaderName.CONTENT_DISPOSITION));
assertNull(headers.getValue(HttpHeaderName.CONTENT_LANGUAGE));
assertNull(headers.getValue(X_MS_BLOB_SEQUENCE_NUMBER));
assertNull(headers.getValue(X_MS_COPY_COMPLETION_TIME));
assertNull(headers.getValue(X_MS_COPY_STATUS_DESCRIPTION));
assertNull(headers.getValue(X_MS_COPY_ID));
assertNull(headers.getValue(X_MS_COPY_PROGRESS));
assertNull(headers.getValue(X_MS_COPY_SOURCE));
assertNull(headers.getValue(X_MS_COPY_STATUS));
assertNull(headers.getValue(X_MS_LEASE_DURATION));
assertEquals(LeaseStateType.AVAILABLE.toString(), headers.getValue(X_MS_LEASE_STATE));
assertEquals(LeaseStatusType.UNLOCKED.toString(), headers.getValue(X_MS_LEASE_STATUS));
assertEquals("bytes", headers.getValue(HttpHeaderName.ACCEPT_RANGES));
assertNull(headers.getValue(X_MS_BLOB_COMMITTED_BLOCK_COUNT));
assertNotNull(headers.getValue(X_MS_SERVER_ENCRYPTED));
assertNull(headers.getValue(X_MS_BLOB_CONTENT_MD5));
assertNotNull(headers.getValue(X_MS_CREATION_TIME));
assertNotNull(r.getDeserializedHeaders().getCreationTime());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), bytes))
.verifyComplete();
}
@Test
public void readEmptyFile() {
fc = dataLakeFileSystemAsyncClient.createFile("emptyFile").block();
StepVerifier.create(fc.read())
.assertNext(r -> assertEquals(0, r.array().length))
.verifyComplete();
}
@Test
public void readWithRetryRange() {
DataLakeFileAsyncClient fileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getPathUrl(),
new MockRetryRangeResponsePolicy("bytes=2-6"));
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fileAsyncClient.readWithResponse(new FileRange(2, 5L),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.verifyError(IOException.class);
}
@Test
public void readMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("readRangeSupplier")
public void readRange(long offset, Long count, String expectedData) {
FileRange range = (count == null) ? new FileRange(offset) : new FileRange(offset, count);
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
ByteArrayOutputStream readData = new ByteArrayOutputStream();
StepVerifier.create(fc.readWithResponse(range, null, null, false)
.flatMap(r -> FluxUtil.collectBytesInByteBufferStream(r.getValue())))
.assertNext(bytes -> assertArrayEquals(expectedData.getBytes(), bytes))
.verifyComplete();
}
private static Stream<Arguments> readRangeSupplier() {
return Stream.of(
Arguments.of(0L, null, DATA.getDefaultText()),
Arguments.of(0L, 5L, DATA.getDefaultText().substring(0, 5)),
Arguments.of(3L, 2L, DATA.getDefaultText().substring(3, 3 + 2))
);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void readAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void readACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.readWithResponse(null, null, drc, false))
.verifyError(DataLakeStorageException.class);
}
@Test
public void readMd5() throws NoSuchAlgorithmException {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readWithResponse(new FileRange(0, 3L),
null, null, true))
.assertNext(r -> {
byte[] contentMD5 = r.getHeaders().getValue(HttpHeaderName.CONTENT_MD5).getBytes();
try {
TestUtils.assertArraysEqual(
Base64.getEncoder().encode(
MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().substring(0, 3).getBytes())),
contentMD5);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void readRetryDefault() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
DataLakeFileAsyncClient failureFileAsyncClient = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new MockFailureResponsePolicy(5));
ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(failureFileAsyncClient.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void downloadFileExists() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0);
fc.flush(DATA.getDefaultDataSizeLong(), true);
StepVerifier.create(fc.readToFile(testFile.getPath()))
.verifyErrorSatisfies(r -> {
UncheckedIOException ex = assertInstanceOf(UncheckedIOException.class, r);
assertInstanceOf(FileAlreadyExistsException.class, ex.getCause());
});
}
@Test
public void downloadFileExistsSucceeds() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExist() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (testFile.exists()) {
assertTrue(testFile.delete());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(fc.readToFile(testFile.getPath(), true))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
}
@Test
public void downloadFileDoesNotExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void downloadFileExistOpenOptions() throws IOException {
File testFile = new File(prefix + ".txt");
testFile.deleteOnExit();
createdFiles.add(testFile);
if (!testFile.exists()) {
assertTrue(testFile.createNewFile());
}
fc.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE));
StepVerifier.create(fc.readToFileWithResponse(testFile.getPath(), null, null,
null, null, false, openOptions))
.assertNext(r -> {
try {
assertEquals(DATA.getDefaultText(), new String(Files.readAllBytes(testFile.toPath()), StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
private static Stream<Integer> downloadFileSupplier() {
return Stream.of(
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("downloadFileSupplier")
public void downloadFileAsyncBufferCopy(int fileSize) {
String fileSystemName = generateFileSystemName();
DataLakeServiceAsyncClient datalakeServiceAsyncClient = new DataLakeServiceClientBuilder()
.endpoint(ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint())
.credential(getDataLakeCredential())
.buildAsyncClient();
DataLakeFileAsyncClient fileAsyncClient = datalakeServiceAsyncClient.createFileSystem(fileSystemName)
.blockOptional()
.orElseThrow(() -> new IllegalStateException("Expected file system to be created."))
.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fileAsyncClient.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fileAsyncClient.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setBlockSizeLong(4L * 1024 * 1024),
null, null, false, null)
.map(Response::getValue))
.assertNext(properties -> assertEquals(fileSize, properties.getFileSize()))
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
@ParameterizedTest
@MethodSource("downloadFileRangeSupplier")
public void downloadFileRange(FileRange range) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), range, null,
null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, range.getOffset(), range.getCount()))
.verifyComplete();
}
private static Stream<FileRange> downloadFileRangeSupplier() {
return Stream.of(
new FileRange(0, DATA.getDefaultDataSizeLong()),
new FileRange(1, DATA.getDefaultDataSizeLong() - 1),
new FileRange(3, 2L),
new FileRange(0, DATA.getDefaultDataSizeLong() - 1),
new FileRange(0, 10 * 1024L)
);
}
@Test
public void downloadFileRangeFail() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(),
new FileRange(DATA.getDefaultDataSizeLong() + 1), null, null,
null, false, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void downloadFileCountNull() {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), new FileRange(0),
null, null, null, false, null))
.assertNext(r -> compareFiles(file, outFile, 0, DATA.getDefaultDataSizeLong()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void downloadFileAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(testResourceNamer.randomName("", 60));
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setLeaseId(setupPathLeaseCondition(fc, leaseID));
assertDoesNotThrow(() -> fc.readToFileWithResponse(outFile.toPath().toString(), null,
null, null, bro, false, null).block());
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void downloadFileACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
File file = getRandomFile(DATA.getDefaultDataSize());
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bro = new DataLakeRequestConditions()
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setLeaseId(leaseID);
StepVerifier.create(fc.readToFileWithResponse(outFile.toPath().toString(), null, null,
null, bro, false, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertTrue(Objects.equals(e.getErrorCode(), "ConditionNotMet") || Objects.equals(e.getErrorCode(),
"LeaseIdMismatchWithBlobOperation"));
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void downloadFileEtagLock() throws IOException {
File file = getRandomFile(Constants.MB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
Files.deleteIfExists(outFile.toPath());
outFile.deleteOnExit();
createdFiles.add(outFile);
AtomicInteger counter = new AtomicInteger();
DataLakeFileAsyncClient facUploading = instrument(new DataLakePathClientBuilder()
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
HttpPipelinePolicy policy = (context, next) -> next.process().flatMap(response -> {
if (counter.incrementAndGet() == 1) {
return facUploading.upload(DATA.getDefaultFlux(), null, true).thenReturn(response);
}
return Mono.just(response);
});
DataLakeFileAsyncClient facDownloading = instrument(new DataLakePathClientBuilder()
.addPolicy(policy)
.endpoint(fc.getPathUrl())
.credential(getDataLakeCredential()))
.buildFileAsyncClient();
ParallelTransferOptions options = new ParallelTransferOptions().setBlockSizeLong((long) Constants.KB);
Hooks.onErrorDropped(ignored -> { /* do nothing with it */ });
StepVerifier.create(facDownloading.readToFileWithResponse(outFile.toPath().toString(), null, options,
null, null, false, null))
.verifyErrorSatisfies(ex -> {
assertTrue(Exceptions.unwrapMultiple(ex).stream()
.anyMatch(ex2 -> {
Throwable unwrapped = Exceptions.unwrap(ex2);
if (unwrapped instanceof DataLakeStorageException) {
return ((DataLakeStorageException) unwrapped).getStatusCode() == 412;
}
return false;
}));
});
sleepIfRunningAgainstService(500);
assertFalse(outFile.exists());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressReceiver(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockReceiver mockReceiver = new MockReceiver();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressReceiver(mockReceiver),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockReceiver.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockReceiver.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockReceiver.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
@SuppressWarnings("deprecation")
private static final class MockReceiver implements ProgressReceiver {
List<Long> progresses = new ArrayList<>();
@Override
public void reportProgress(long bytesTransferred) {
progresses.add(bytesTransferred);
}
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {100, 8 * 1026 * 1024 + 10})
public void downloadFileProgressListener(int fileSize) {
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(), true).block();
File outFile = new File(prefix);
outFile.deleteOnExit();
createdFiles.add(outFile);
if (outFile.exists()) {
assertTrue(outFile.delete());
}
MockProgressListener mockListener = new MockProgressListener();
fc.readToFileWithResponse(outFile.toPath().toString(), null,
new ParallelTransferOptions().setProgressListener(mockListener),
new DownloadRetryOptions().setMaxRetryRequests(3), null, false, null).block();
assertTrue(mockListener.progresses.stream().anyMatch(progress -> progress == fileSize));
assertFalse(mockListener.progresses.stream().anyMatch(progress -> progress > fileSize));
long prevCount = -1;
for (long progress : mockListener.progresses) {
assertTrue(progress >= prevCount, "Reported progress should monotonically increase");
prevCount = progress;
}
}
private static final class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
}
@Test
public void renameMin() {
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(),
null, null, null), 201);
}
@Test
public void renameWithResponse() {
StepVerifier.create(fc.renameWithResponse(null, generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameFilesystemWithResponse() {
DataLakeFileSystemAsyncClient newFileSystem = primaryDataLakeServiceAsyncClient.createFileSystem(generateFileSystemName()).block();
StepVerifier.create(fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null)
.flatMap(r -> r.getValue().getPropertiesWithResponse(null)))
.assertNext(p -> assertEquals(p.getStatusCode(), 200))
.verifyComplete();
StepVerifier.create(fc.getProperties())
.verifyErrorSatisfies(r -> {
assertInstanceOf(DataLakeStorageException.class, r);
});
}
@Test
public void renameError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), null,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({",", "%20%25,%20%25", "%20%25,", ",%20%25"})
public void renameUrlEncoded(String source, String destination) {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName() + source);
fc.create().block();
StepVerifier.create(fc.renameWithResponse(null, generatePathName() + destination, null, null, null)
.flatMap(r -> {
assertEquals(201, r.getStatusCode());
return r.getValue().getPropertiesWithResponse(null);
}))
.assertNext(piece -> assertEquals(200, piece.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameSourceAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, generatePathName(), drc,
null, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameSourceACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, generatePathName(), drc,
null, null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void renameDestAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(destFile, leaseID))
.setIfMatch(setupPathMatchCondition(destFile, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.renameWithResponse(null, pathName, null,
drc, null), 201);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void renameDestACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
String pathName = generatePathName();
DataLakeFileAsyncClient destFile = dataLakeFileSystemAsyncClient.createFile(pathName).block();
setupPathLeaseCondition(destFile, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(destFile, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.renameWithResponse(null, pathName, null, drc, null))
.verifyError(DataLakeStorageException.class);
}
@Test
public void renameSasToken() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void renameSasTokenWithLeadingQuestionMark() {
FileSystemSasPermission permissions = new FileSystemSasPermission()
.setReadPermission(true)
.setMovePermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setAddPermission(true)
.setDeletePermission(true);
String sas = "?" + dataLakeFileSystemAsyncClient.generateSas(new DataLakeServiceSasSignatureValues(testResourceNamer.now().plusDays(1), permissions));
DataLakeFileAsyncClient client = getFileAsyncClient(sas, dataLakeFileSystemAsyncClient.getFileSystemUrl(), fc.getFilePath());
DataLakeFileAsyncClient destClient = client.rename(dataLakeFileSystemAsyncClient.getFileSystemName(), generatePathName()).block();
StepVerifier.create(destClient.getPropertiesWithResponse(null))
.assertNext(r -> assertEquals(r.getStatusCode(), 200))
.verifyComplete();
}
@Test
public void appendDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void appendDataMd5() throws NoSuchAlgorithmException {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
byte[] md5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultText().getBytes());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, md5, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("appendDataIllegalArgumentsSupplier")
public void appendDataIllegalArguments(Flux<ByteBuffer> is, long dataSize, Class<? extends Throwable> exceptionType) {
StepVerifier.create(fc.append(is, 0, dataSize))
.verifyError(exceptionType);
}
private static Stream<Arguments> appendDataIllegalArgumentsSupplier() {
return Stream.of(
Arguments.of(null, DATA.getDefaultDataSizeLong(), NullPointerException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() + 1, UnexpectedLengthException.class),
Arguments.of(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong() - 1, UnexpectedLengthException.class)
);
}
@Test
public void appendDataEmptyBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(BinaryData.fromBytes(new byte[0]), 0))
.verifyError(DataLakeStorageException.class);
}
@Test
public void appendDataNullBody() {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
StepVerifier.create(fc.append(null, 0, 0))
.verifyError(NullPointerException.class);
}
@Test
public void appendDataLease() {
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0,
null, setupPathLeaseCondition(fc, RECEIVED_LEASE_ID)), 202);
}
@Test
public void appendDataLeaseFail() {
setupPathLeaseCondition(fc, RECEIVED_LEASE_ID);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null, GARBAGE_LEASE_ID))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(412, e.getResponse().getStatusCode());
});
}
private static boolean olderThan20200804ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_08_04);
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquire() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAutoRenew() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.AUTO_RENEW)
.setLeaseId(leaseId);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.LOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.LEASED, r.getLeaseState());
assertEquals(LeaseDurationType.FIXED, r.getLeaseDuration());
})
.verifyComplete();
}
@Test
public void appendDataLeaseRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
String leaseId = CoreUtils.randomUuid().toString();
DataLakeLeaseAsyncClient leaseClient = createLeaseAsyncClient(fc, leaseId);
leaseClient.acquireLease(15).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.RELEASE)
.setLeaseId(leaseId)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@DisabledIf("olderThan20200804ServiceVersion")
@Test
public void appendDataLeaseAcquireRelease() {
fc = dataLakeFileSystemAsyncClient.createFileIfNotExists(generatePathName()).block();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseAction(LeaseAction.ACQUIRE_RELEASE)
.setProposedLeaseId(CoreUtils.randomUuid().toString())
.setLeaseDuration(15)
.setFlush(true);
assertAsyncResponseStatusCode(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions),
202);
StepVerifier.create(fc.getProperties())
.assertNext(r -> {
assertEquals(LeaseStatusType.UNLOCKED, r.getLeaseStatus());
assertEquals(LeaseStateType.AVAILABLE, r.getLeaseState());
})
.verifyComplete();
}
@Test
public void appendDataError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.verifyErrorSatisfies(r -> {
DataLakeStorageException e = assertInstanceOf(DataLakeStorageException.class, r);
assertEquals(404, e.getResponse().getStatusCode());
});
}
@Test
public void appendDataRetryOnTransientFailure() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.append(DATA.getDefaultBinaryData(), 0).block();
fc.flush(DATA.getDefaultDataSizeLong(), true).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void appendBinaryDataMin() {
assertDoesNotThrow(() -> fc.append(DATA.getDefaultBinaryData(), 0).block());
}
@Test
public void appendBinaryData() {
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, null))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void appendBinaryDataFlush() {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions().setFlush(true);
StepVerifier.create(fc.appendWithResponse(DATA.getDefaultBinaryData(), 0, appendOptions))
.assertNext(r -> {
HttpHeaders headers = r.getHeaders();
assertEquals(202, r.getStatusCode());
assertNotNull(headers.getValue(X_MS_REQUEST_ID));
assertNotNull(headers.getValue(X_MS_VERSION));
assertNotNull(headers.getValue(HttpHeaderName.DATE));
assertTrue(Boolean.parseBoolean(headers.getValue(X_MS_REQUEST_SERVER_ENCRYPTED)));
})
.verifyComplete();
}
@Test
public void flushDataMin() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
}
@Test
public void flushClose() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
true, null, null).block());
}
@Test
public void flushRetainUncommittedData() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flushWithResponse(DATA.getDefaultDataSizeLong(), true,
false, null, null).block());
}
@Test
public void flushIA() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fc.create().block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flushWithResponse(4, false, false, null,
null))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource(value = {"null,null,null,null,null", "control,disposition,encoding,language,type"})
public void flushHeaders(String cacheControl, String contentDisposition, String contentEncoding,
String contentLanguage, String contentType) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
PathHttpHeaders headers = new PathHttpHeaders().setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentType(contentType);
fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false, false, headers, null).block();
contentType = (contentType == null) ? "application/octet-stream" : contentType;
String finalContentType = contentType;
StepVerifier.create(fc.getPropertiesWithResponse(null))
.assertNext(r -> validatePathProperties(r, cacheControl, contentDisposition, contentEncoding, contentLanguage,
null, finalContentType))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void flushAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
assertAsyncResponseStatusCode(fc.flushWithResponse(DATA.getDefaultDataSizeLong(), false,
false, null, drc), 200);
}
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void flushACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
fc = dataLakeFileSystemAsyncClient.createFile(generatePathName()).block();
fc.append(DATA.getDefaultBinaryData(), 0).block();
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions drc = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
StepVerifier.create(fc.flushWithResponse(DATA.getDefaultDataSize(), false, false,
null, drc))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fc.flush(1, true))
.verifyError(DataLakeStorageException.class);
}
@Test
public void flushDataOverwrite() {
fc.append(DATA.getDefaultBinaryData(), 0).block();
assertDoesNotThrow(() -> fc.flush(DATA.getDefaultDataSizeLong(), true).block());
fc.append(DATA.getDefaultBinaryData(), 0).block();
StepVerifier.create(fc.flush(DATA.getDefaultDataSizeLong(), false))
.verifyError(DataLakeStorageException.class);
}
@ParameterizedTest
@CsvSource({"file,file", "path/to]a file,path/to]a file", "path%2Fto%5Da%20file,path/to]a file", "斑點,斑點",
"%E6%96%91%E9%BB%9E,斑點"})
public void getFileNameAndBuildClient(String originalFileName, String finalFileName) {
DataLakeFileAsyncClient client = dataLakeFileSystemAsyncClient.getFileAsyncClient(originalFileName);
assertEquals(finalFileName, client.getFilePath());
}
@Test
public void builderBearerTokenValidation() {
String endpoint = BlobUrlParts.parse(fc.getFileUrl()).setScheme("http").toUrl().toString();
assertThrows(IllegalArgumentException.class, () -> new DataLakePathClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint(endpoint)
.buildFileAsyncClient());
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileSupplier")
public void uploadFromFile(int fileSize, Long blockSize) throws IOException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(fileSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fac.uploadFromFile(file.getPath(),
new ParallelTransferOptions().setBlockSizeLong(blockSize), null, null, null))
.verifyComplete();
File outFile = new File(file.getPath() + "result");
assertTrue(outFile.createNewFile());
outFile.deleteOnExit();
createdFiles.add(outFile);
StepVerifier.create(fac.readToFile(outFile.getPath(), true))
.expectNextCount(1)
.verifyComplete();
compareFiles(file, outFile, 0, fileSize);
}
private static Stream<Arguments> uploadFromFileSupplier() {
return Stream.of(
Arguments.of(10, null),
Arguments.of(10 * Constants.KB, null),
Arguments.of(50 * Constants.MB, null),
Arguments.of(101 * Constants.MB, 4L * 1024 * 1024)
);
}
@Test
public void uploadFromFileWithMetadata() throws IOException {
Map<String, String> metadata = Collections.singletonMap("metadata", "value");
File file = getRandomFile(Constants.KB);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.getPath(), null, null, metadata, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(metadata, r.getMetadata()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> {
try {
TestUtils.assertArraysEqual(Files.readAllBytes(file.toPath()), r);
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
}
@Test
public void uploadFromFileDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFile(file.toPath().toString()))
.verifyError(DataLakeStorageException.class);
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString()))
.verifyError(DataLakeStorageException.class);
}
@Test
public void uploadFromFileOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true).block());
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
/*
* Reports the number of bytes sent when uploading a file. This is different from other reporters which track the
* number of reports as upload from file hooks into the loading data from disk data stream which is a hard-coded
* read size.
*/
@SuppressWarnings("deprecation")
private static final class FileUploadReporter implements ProgressReceiver {
private long reportedByteCount;
@Override
public void reportProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
private static final class FileUploadListener implements ProgressListener {
private long reportedByteCount;
@Override
public void handleProgress(long bytesTransferred) {
this.reportedByteCount = bytesTransferred;
}
long getReportedByteCount() {
return this.reportedByteCount;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadReporter uploadReporter = new FileAsyncApiTests.FileUploadReporter();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadReporter.getReportedByteCount());
}
private static Stream<Arguments> uploadFromFileWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 10L * Constants.KB, 100)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadFromFileWithProgressSupplier")
public void uploadFromFileListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.FileUploadListener uploadListener = new FileAsyncApiTests.FileUploadListener();
File file = getRandomFile(size);
file.deleteOnExit();
createdFiles.add(file);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(blockSize - 1);
StepVerifier.create(fac.uploadFromFile(file.toPath().toString(), parallelTransferOptions, null,
null, null))
.verifyComplete();
assertEquals(size, uploadListener.getReportedByteCount());
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileOptions(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
fc.uploadFromFile(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
private static Stream<Arguments> uploadFromFileOptionsSupplier() {
return Stream.of(
Arguments.of(100, 50L, null),
Arguments.of(100, 50L, 20L)
);
}
@ParameterizedTest
@MethodSource("uploadFromFileOptionsSupplier")
public void uploadFromFileWithResponse(int dataSize, long singleUploadSize, Long blockSize) {
File file = getRandomFile(dataSize);
file.deleteOnExit();
createdFiles.add(file);
StepVerifier.create(fc.uploadFromFileWithResponse(file.toPath().toString(),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.assertNext(r -> {
assertEquals(200, r.getStatusCode());
assertNotNull(r.getValue().getETag());
assertNotNull(r.getValue().getLastModified());
})
.verifyComplete();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(dataSize, r.getFileSize()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void asyncBufferedUploadEmpty() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.just(ByteBuffer.wrap(new byte[0])), null))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadEmptyBuffersSupplier")
public void asyncBufferedUploadEmptyBuffers(ByteBuffer buffer1, ByteBuffer buffer2, ByteBuffer buffer3,
byte[] expectedDownload) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fac.upload(Flux.fromIterable(Arrays.asList(buffer1, buffer2, buffer3)),
null, true))
.assertNext(pathInfo -> assertNotNull(pathInfo.getETag()))
.verifyComplete();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fac.read()))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedDownload, bytes))
.verifyComplete();
}
private static Stream<Arguments> asyncBufferedUploadEmptyBuffersSupplier() {
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
byte[] helloBytes = "Hello".getBytes(StandardCharsets.UTF_8);
byte[] worldBytes = "world!".getBytes(StandardCharsets.UTF_8);
return Stream.of(
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), "Hello world!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), emptyBuffer, "Hello ".getBytes(StandardCharsets.UTF_8)),
Arguments.of(ByteBuffer.wrap(helloBytes), emptyBuffer, ByteBuffer.wrap(worldBytes), "Helloworld!".getBytes(StandardCharsets.UTF_8)),
Arguments.of(emptyBuffer, ByteBuffer.wrap(" ".getBytes(StandardCharsets.UTF_8)), ByteBuffer.wrap(worldBytes), " world!".getBytes(StandardCharsets.UTF_8))
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("asyncBufferedUploadSupplier")
public void asyncBufferedUpload(int dataSize, long bufferSize, int numBuffs) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created"));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
byte[] data = getRandomByteArray(dataSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize)
.setMaxConcurrency(numBuffs)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
facWrite.upload(Flux.just(ByteBuffer.wrap(data)), parallelTransferOptions, true).block();
if (dataSize < 100 * 1024 * 1024) {
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(facRead.read(), dataSize))
.assertNext(bytes -> TestUtils.assertArraysEqual(data, bytes))
.verifyComplete();
}
}
private static Stream<Arguments> asyncBufferedUploadSupplier() {
return Stream.of(
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(35 * Constants.MB, 5L * Constants.MB, 5),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(100 * Constants.MB, 10L * Constants.MB, 5),
Arguments.of(10 * Constants.MB, (long) Constants.MB, 10),
Arguments.of(50 * Constants.MB, 10L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 2L * Constants.MB, 4),
Arguments.of(10 * Constants.MB, 3L * Constants.MB, 3)
);
}
private static void compareListToBuffer(List<ByteBuffer> buffers, ByteBuffer result) {
result.position(0);
for (ByteBuffer buffer : buffers) {
buffer.position(0);
result.limit(result.position() + buffer.remaining());
TestUtils.assertByteBuffersEqual(buffer, result);
result.position(result.position() + buffer.remaining());
}
assertEquals(0, result.remaining());
}
@SuppressWarnings("deprecation")
private static final class Reporter implements ProgressReceiver {
private final long blockSize;
private long reportingCount;
Reporter(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void reportProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
private static final class Listener implements ProgressListener {
private final long blockSize;
private long reportingCount;
Listener(long blockSize) {
this.blockSize = blockSize;
}
@Override
public void handleProgress(long bytesTransferred) {
assert bytesTransferred % blockSize == 0;
this.reportingCount += 1;
}
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithReporter(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Reporter uploadReporter = new FileAsyncApiTests.Reporter(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressReceiver(uploadReporter)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadReporter.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadWithProgressSupplier() {
return Stream.of(
Arguments.of(10 * Constants.MB, 10L * Constants.MB, 8),
Arguments.of(20 * Constants.MB, (long) Constants.MB, 5),
Arguments.of(10 * Constants.MB, 5L * Constants.MB, 2),
Arguments.of(10 * Constants.MB, 512L * Constants.KB, 20)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadWithProgressSupplier")
public void bufferedUploadWithListener(int size, long blockSize, int bufferCount) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
FileAsyncApiTests.Listener uploadListener = new FileAsyncApiTests.Listener(blockSize);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(bufferCount)
.setProgressListener(uploadListener)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(size)), parallelTransferOptions, null,
null, null))
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertTrue(uploadListener.reportingCount >= (size / blockSize));
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadChunkedSourceSupplier")
public void bufferedUploadChunkedSource(List<Integer> dataSizeList, long bufferSize, int numBuffers) {
DataLakeFileAsyncClient facWrite = getPrimaryServiceClientForWrites(bufferSize)
.getFileSystemAsyncClient(dataLakeFileSystemAsyncClient.getFileSystemName())
.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("File was not created."));
DataLakeFileAsyncClient facRead = dataLakeFileSystemAsyncClient.getFileAsyncClient(facWrite.getFileName());
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(bufferSize * Constants.MB)
.setMaxConcurrency(numBuffers)
.setMaxSingleUploadSizeLong(4L * Constants.MB);
List<ByteBuffer> dataList = dataSizeList.stream()
.map(size -> getRandomData(size * Constants.MB))
.collect(Collectors.toList());
Mono<byte[]> uploadOperation = facWrite.upload(Flux.fromIterable(dataList), parallelTransferOptions, true)
.then(FluxUtil.collectBytesInByteBufferStream(facRead.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadChunkedSourceSupplier() {
return Stream.of(
Arguments.of(Arrays.asList(7, 7), 10L, 2),
Arguments.of(Arrays.asList(3, 3, 3, 3, 3, 3, 3), 10L, 2),
Arguments.of(Arrays.asList(10, 10), 10L, 2),
Arguments.of(Arrays.asList(50, 51, 49), 10L, 2)
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathing(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingSupplier")
public void bufferedUploadHandlePathingHotFlux(List<Integer> dataSizeList) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
Mono<byte[]> uploadOperation = fac.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fac.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB), Collections.singletonList(4 * Constants.MB));
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier")
public void bufferedUploadHandlePathingHotFluxWithTransientFailure(List<Integer> dataSizeList) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
List<ByteBuffer> dataList = dataSizeList.stream().map(this::getRandomData).collect(Collectors.toList());
DataLakeFileAsyncClient fcAsync = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
Mono<byte[]> uploadOperation = clientWithFailure.upload(Flux.fromIterable(dataList).publish().autoConnect(),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB), true)
.then(FluxUtil.collectBytesInByteBufferStream(fcAsync.read()));
StepVerifier.create(uploadOperation)
.assertNext(bytes -> compareListToBuffer(dataList, ByteBuffer.wrap(bytes)))
.verifyComplete();
}
private static Stream<List<Integer>> bufferedUploadHandlePathingHotFluxWithTransientFailureSupplier() {
return Stream.of(Arrays.asList(10, 100, 1000, 10000), Arrays.asList(4 * Constants.MB + 1, 10),
Arrays.asList(4 * Constants.MB, 4 * Constants.MB));
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@ValueSource(ints = {11110, 2 * Constants.MB + 11})
public void bufferedUploadAsyncHandlePathingWithTransientFailure(int dataSize) {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
byte[] data = getRandomByteArray(dataSize);
clientWithFailure.uploadWithResponse(new FileParallelUploadOptions(new ByteArrayInputStream(data), dataSize)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxSingleUploadSizeLong(2L * Constants.MB)
.setBlockSizeLong(2L * Constants.MB))).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(data, readArray);
}
@Test
public void bufferedUploadIllegalArgumentsNull() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Cannot create file."));
StepVerifier.create(fac.upload((Flux<ByteBuffer>) null,
new ParallelTransferOptions().setBlockSizeLong(4L).setMaxConcurrency(4), true))
.verifyError(NullPointerException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("bufferedUploadHeadersSupplier")
public void bufferedUploadHeaders(int dataSize, String cacheControl, String contentDisposition,
String contentEncoding, String contentLanguage, boolean validateContentMD5, String contentType)
throws NoSuchAlgorithmException {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
byte[] randomData = getRandomByteArray(dataSize);
byte[] contentMD5 = validateContentMD5 ? MessageDigest.getInstance("MD5").digest(randomData) : null;
Mono<Response<PathProperties>> uploadOperation = fac
.uploadWithResponse(Flux.just(ByteBuffer.wrap(randomData)),
new ParallelTransferOptions().setMaxSingleUploadSizeLong(4L * Constants.MB),
new PathHttpHeaders()
.setCacheControl(cacheControl)
.setContentDisposition(contentDisposition)
.setContentEncoding(contentEncoding)
.setContentLanguage(contentLanguage)
.setContentMd5(contentMD5)
.setContentType(contentType), null, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> validatePathProperties(response, cacheControl, contentDisposition, contentEncoding,
contentLanguage, contentMD5, contentType == null ? "application/octet-stream" : contentType))
.verifyComplete();
}
private static Stream<Arguments> bufferedUploadHeadersSupplier() {
return Stream.of(
Arguments.of(DATA.getDefaultDataSize(), null, null, null, null, true, null),
Arguments.of(DATA.getDefaultDataSize(), "control", "disposition", "encoding", "language", true, "type"),
Arguments.of(6 * Constants.MB, null, null, null, null, false, null),
Arguments.of(6 * Constants.MB, "control", "disposition", "encoding", "language", true, "type")
);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz"}, nullValues = "null")
public void bufferedUploadMetadata(String key1, String value1, String key2, String value2) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Map<String, String> metadata = new HashMap<>();
if (key1 != null) {
metadata.put(key1, value1);
}
if (key2 != null) {
metadata.put(key2, value2);
}
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L)
.setMaxConcurrency(10);
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, metadata, null)
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(metadata, response.getValue().getMetadata());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void bufferedUploadOptions(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger appendCount = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
appendCount.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
StepVerifier.create(spyClient.uploadWithResponse(Flux.just(getRandomData(dataSize)),
new ParallelTransferOptions().setBlockSizeLong(blockSize).setMaxSingleUploadSizeLong(singleUploadSize),
null, null, null))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, appendCount.get());
}
@Test
public void bufferedUploadPermissionsAndUmask() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
Mono<Response<PathProperties>> uploadOperation = fac.uploadWithResponse(
new FileParallelUploadOptions(Flux.just(getRandomData(10))).setPermissions("0777").setUmask("0057"))
.then(fac.getPropertiesWithResponse(null));
StepVerifier.create(uploadOperation)
.assertNext(response -> {
assertEquals(200, response.getStatusCode());
assertEquals(10, response.getValue().getFileSize());
})
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void bufferedUploadAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(setupPathMatchCondition(fac, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void bufferedUploadACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fac, leaseID))
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fac, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(10L);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyErrorSatisfies(ex -> {
DataLakeStorageException exception = assertInstanceOf(DataLakeStorageException.class, ex);
assertEquals(412, exception.getStatusCode());
});
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@CsvSource({"7,2", "5,2"})
public void uploadBufferPoolLockThreeOrMoreBuffers(long blockSize, int numBuffers) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.createFile(generatePathName()).blockOptional()
.orElseThrow(() -> new RuntimeException("Could not create file"));
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().
setLeaseId(setupPathLeaseCondition(fac, GARBAGE_LEASE_ID));
ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxConcurrency(numBuffers);
StepVerifier.create(fac.uploadWithResponse(Flux.just(getRandomData(10)),
parallelTransferOptions, null, null, requestConditions))
.verifyError(DataLakeStorageException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadDefaultNoOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fac.upload(DATA.getDefaultFlux(), null).block();
StepVerifier.create(fac.upload(DATA.getDefaultFlux(), null))
.verifyError(IllegalArgumentException.class);
}
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void bufferedUploadOverwrite() {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
File file = getRandomFile(50);
file.deleteOnExit();
createdFiles.add(file);
assertDoesNotThrow(() -> fc.uploadFromFile(file.toPath().toString(), true));
StepVerifier.create(fac.uploadFromFile(getRandomFile(50).toPath().toString(), true))
.verifyComplete();
}
@Test
public void bufferedUploadNonMarkableStream() throws IOException {
File file = getRandomFile(10);
file.deleteOnExit();
createdFiles.add(file);
File outFile = getRandomFile(10);
outFile.deleteOnExit();
createdFiles.add(outFile);
Flux<ByteBuffer> stream = FluxUtil.readFile(AsynchronousFileChannel.open(file.toPath()), 0, file.length());
fc.upload(stream, null, true).block();
fc.readToFile(outFile.toPath().toString(), true).block();
compareFiles(file, outFile, 0, file.length());
}
@Test
public void uploadInputStreamNoLength() {
assertDoesNotThrow(() ->
fc.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("uploadInputStreamBadLengthSupplier")
public void uploadInputStreamBadLength(long length) {
assertThrows(Exception.class, () -> fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), length)).block());
}
private static Stream<Long> uploadInputStreamBadLengthSupplier() {
return Stream.of(0L, -100L, DATA.getDefaultDataSizeLong() - 1, DATA.getDefaultDataSizeLong() + 1);
}
@Test
public void uploadSuccessfulRetry() {
DataLakeFileAsyncClient clientWithFailure = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl(),
new TransientFailureInjectingHttpPipelinePolicy());
assertDoesNotThrow(() -> clientWithFailure.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryData() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(
() -> client.uploadWithResponse(new FileParallelUploadOptions(DATA.getDefaultBinaryData())).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@Test
public void uploadBinaryDataOverwrite() {
DataLakeFileAsyncClient client = getFileAsyncClient(getDataLakeCredential(), fc.getFileUrl());
assertDoesNotThrow(() -> client.upload(DATA.getDefaultBinaryData(), null, true).block());
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(fc.read()))
.assertNext(r -> TestUtils.assertArraysEqual(DATA.getDefaultBytes(), r))
.verifyComplete();
}
@DisabledIf("olderThan20210410ServiceVersion")
@Test
public void uploadEncryptionContext() {
String encryptionContext = "encryptionContext";
FileParallelUploadOptions options = new FileParallelUploadOptions(DATA.getDefaultInputStream())
.setEncryptionContext(encryptionContext);
fc.uploadWithResponse(options).block();
StepVerifier.create(fc.getProperties())
.assertNext(r -> assertEquals(encryptionContext, r.getEncryptionContext()))
.verifyComplete();
}
/* Quick Query Tests. */
private void uploadCsv(FileQueryDelimitedSerialization s, int numCopies) {
String columnSeparator = Character.toString(s.getColumnSeparator());
String header = "rn1" + columnSeparator + "rn2" + columnSeparator + "rn3" + columnSeparator + "rn4"
+ s.getRecordSeparator();
byte[] headers = header.getBytes();
String csv = "100" + columnSeparator + "200" + columnSeparator + "300" + columnSeparator + "400"
+ s.getRecordSeparator() + "300" + columnSeparator + "400" + columnSeparator + "500" + columnSeparator
+ "600" + s.getRecordSeparator();
byte[] csvData = csv.getBytes();
int headerLength = s.isHeadersPresent() ? headers.length : 0;
byte[] data = new byte[headerLength + csvData.length * numCopies];
if (s.isHeadersPresent()) {
System.arraycopy(headers, 0, data, 0, headers.length);
}
for (int i = 0; i < numCopies; i++) {
int o = i * csvData.length + headerLength;
System.arraycopy(csvData, 0, data, o, csvData.length);
}
fc.create(true).block();
fc.append(BinaryData.fromBytes(data), 0).block();
fc.flush(data.length, true).block();
}
private void uploadSmallJson(int numCopies) {
StringBuilder b = new StringBuilder();
b.append("{\n");
for (int i = 0; i < numCopies; i++) {
b.append(String.format("\t\"name%d\": \"owner%d\",\n", i, i));
}
b.append('}');
fc.create(true).block();
fc.append(BinaryData.fromString(b.toString()), 0).block();
fc.flush(b.length(), true).block();
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@ValueSource(ints = {
1,
32,
256,
400,
4000
})
public void queryMin(int numCopies) {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(ser, numCopies);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = fc.query(expression).reduce(new ByteArrayOutputStream(), (outputStream, piece) -> {
try {
outputStream.write(piece.array());
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
return outputStream;
}).block();
byte[] queryArray = queryData.toByteArray();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryCsvSerializationSeparatorSupplier")
public void queryCsvSerializationSeparator(char recordSeparator, char columnSeparator, boolean headersPresentIn,
boolean headersPresentOut) {
FileQueryDelimitedSerialization serIn = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentIn);
FileQueryDelimitedSerialization serOut = new FileQueryDelimitedSerialization()
.setRecordSeparator(recordSeparator)
.setColumnSeparator(columnSeparator)
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(headersPresentOut);
uploadCsv(serIn, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(serIn).setOutputSerialization(serOut))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
if (headersPresentIn && !headersPresentOut) {
assertEquals(readArray.length - 16, queryArray.length);
/* Account for 16 bytes of header. */
TestUtils.assertArraysEqual(readArray, 16, queryArray, 0, readArray.length - 16);
} else {
TestUtils.assertArraysEqual(readArray, queryArray);
}
});
}
private static Stream<Arguments> queryCsvSerializationSeparatorSupplier() {
return Stream.of(
Arguments.of('\n', ',', false, false),
Arguments.of('\n', ',', true, true),
Arguments.of('\n', ',', true, false),
Arguments.of('\t', ',', false, false),
Arguments.of('\r', ',', false, false),
Arguments.of('<', ',', false, false),
Arguments.of('>', ',', false, false),
Arguments.of('&', ',', false, false),
Arguments.of('\\', ',', false, false),
Arguments.of(',', '.', false, false),
Arguments.of(',', ';', false, false),
Arguments.of('\n', '\t', false, false),
Arguments.of('\n', '<', false, false),
Arguments.of('\n', '>', false, false),
Arguments.of('\n', '&', false, false),
Arguments.of('\n', '\\', false, false)
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryCsvSerializationEscapeAndFieldQuote() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\\') /* Escape set here. */
.setFieldQuote('"') /* Field quote set here*/
.setHeadersPresent(false);
uploadCsv(ser, 32);
String expression = "SELECT * from BlobStorage";
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
byte[] queryArray = fc.queryWithResponse(new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser))
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("queryInputJsonSupplier")
public void queryInputJson(int numCopies, char recordSeparator) {
FileQueryJsonSerialization ser = new FileQueryJsonSerialization()
.setRecordSeparator(recordSeparator);
uploadSmallJson(numCopies);
String expression = "SELECT * from BlobStorage";
ByteArrayOutputStream readData = new ByteArrayOutputStream();
FluxUtil.writeToOutputStream(fc.read(), readData).block();
readData.write(10);
byte[] readArray = readData.toByteArray();
liveTestScenarioWithRetry(() -> {
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData)
.setInputSerialization(ser).setOutputSerialization(ser);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(readArray, queryArray);
});
}
private static Stream<Arguments> queryInputJsonSupplier() {
return Stream.of(
Arguments.of(0, '\n'),
Arguments.of(10, '\n'),
Arguments.of(100, '\n'),
Arguments.of(1000, '\n')
);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputJson() {
liveTestScenarioWithRetry(() -> {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 1);
FileQueryJsonSerialization outSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "{\"_1\":\"100\",\"_2\":\"200\",\"_3\":\"300\",\"_4\":\"400\"}".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, 0, queryArray, 0, expectedData.length);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputJsonOutputCsv() {
liveTestScenarioWithRetry(() -> {
FileQueryJsonSerialization inSer = new FileQueryJsonSerialization().setRecordSeparator('\n');
uploadSmallJson(2);
FileQueryDelimitedSerialization outSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
byte[] expectedData = "owner0,owner1\n".getBytes();
ByteArrayOutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions optionsOs = new FileQueryOptions(expression, queryData).setInputSerialization(inSer)
.setOutputSerialization(outSer);
byte[] queryArray = fc.queryWithResponse(optionsOs)
.flatMap(piece -> FluxUtil.collectBytesInByteBufferStream(piece.getValue())).block();
TestUtils.assertArraysEqual(expectedData, queryArray);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryInputCsvOutputArrow() {
FileQueryDelimitedSerialization inSer = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(inSer, 32);
List<FileQueryArrowField> schema = Collections.singletonList(
new FileQueryArrowField(FileQueryArrowFieldType.DECIMAL).setName("Name").setPrecision(4).setScale(2));
FileQueryArrowSerialization outSer = new FileQueryArrowSerialization().setSchema(schema);
String expression = "SELECT _2 from BlobStorage WHERE _1 > 250;";
liveTestScenarioWithRetry(() -> {
OutputStream queryData = new ByteArrayOutputStream();
FileQueryOptions options = new FileQueryOptions(expression, queryData).setOutputSerialization(outSer);
assertDoesNotThrow(() -> fc.queryWithResponse(options).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryNonFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT _1 from BlobStorage WHERE _2 > 250";
liveTestScenarioWithRetry(() -> {
MockErrorReceiver receiver2 = new FileAsyncApiTests.MockErrorReceiver("InvalidColumnOrdinal");
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(base.setColumnSeparator(','))
.setOutputSerialization(base.setColumnSeparator(','))
.setErrorConsumer(receiver2)).block().getValue().blockLast());
assertTrue(receiver2.numErrors > 0);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryFatalError() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(true);
uploadCsv(base.setColumnSeparator('.'), 32);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(new FileQueryOptions(expression,
new ByteArrayOutputStream()).setInputSerialization(new FileQueryJsonSerialization())))
.assertNext(r -> {
assertThrows(RuntimeException.class, () -> r.getValue().blockLast());
})
.verifyComplete();
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryProgressReceiver() {
FileQueryDelimitedSerialization base = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
uploadCsv(base.setColumnSeparator('.'), 32);
long sizeofBlobToRead = fc.getProperties().block().getFileSize();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
assertTrue(mockReceiver.progressList.contains(sizeofBlobToRead));
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void queryMultipleRecordsWithProgressReceiver() {
FileQueryDelimitedSerialization ser = new FileQueryDelimitedSerialization()
.setRecordSeparator('\n')
.setColumnSeparator(',')
.setEscapeChar('\0')
.setFieldQuote('\0')
.setHeadersPresent(false);
String expression = "SELECT * from BlobStorage";
uploadCsv(ser, 512000);
liveTestScenarioWithRetry(() -> {
MockProgressReceiver mockReceiver = new com.azure.storage.file.datalake.FileAsyncApiTests.MockProgressReceiver();
long temp = 0;
FileQueryOptions options = new FileQueryOptions(expression, new ByteArrayOutputStream()).setProgressConsumer(mockReceiver);
fc.queryWithResponse(options).block().getValue().blockLast();
for (long progress : mockReceiver.progressList) {
assertTrue(progress >= temp, "Expected progress to be greater than or equal to previous progress.");
temp = progress;
}
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@CsvSource(value = {"true,false", "false,true"})
public void queryInputOutputIA(boolean input, boolean output) {
/* Mock random impl of QQ Serialization*/
FileQuerySerialization ser = new RandomOtherSerialization();
FileQuerySerialization inSer = input ? ser : null;
FileQuerySerialization outSer = output ? ser : null;
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertThrows(IllegalArgumentException.class, () -> fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream())
.setInputSerialization(inSer)
.setOutputSerialization(outSer)).block());
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryArrowInputIA() {
FileQueryArrowSerialization inSer = new FileQueryArrowSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setInputSerialization(inSer)))
.verifyError(IllegalArgumentException.class);
});
}
private static boolean olderThan20201002ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2020_10_02);
}
@DisabledIf("olderThan20201002ServiceVersion")
@Test
public void queryParquetOutputIA() {
FileQueryParquetSerialization outSer = new FileQueryParquetSerialization();
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setOutputSerialization(outSer)))
.verifyError(IllegalArgumentException.class);
});
}
@SuppressWarnings("resource")
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void queryError() {
fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
liveTestScenarioWithRetry(() -> {
StepVerifier.create(fc.query("SELECT * from BlobStorage"))
.verifyError(DataLakeStorageException.class);
});
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("modifiedMatchAndLeaseIdSupplier")
public void queryAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(setupPathLeaseCondition(fc, leaseID))
.setIfMatch(setupPathMatchCondition(fc, match))
.setIfNoneMatch(noneMatch)
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
liveTestScenarioWithRetry(() -> {
assertDoesNotThrow(() -> fc.queryWithResponse(new FileQueryOptions(expression, new ByteArrayOutputStream())
.setRequestConditions(bac)).block());
});
}
private void liveTestScenarioWithRetry(Runnable runnable) {
if (!interceptorManager.isLiveMode()) {
runnable.run();
return;
}
int retry = 0;
while (retry < 5) {
try {
runnable.run();
break;
} catch (Exception ex) {
retry++;
sleepIfRunningAgainstService(5000);
}
}
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("invalidModifiedMatchAndLeaseIdSupplier")
public void queryACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch,
String leaseID) {
setupPathLeaseCondition(fc, leaseID);
DataLakeRequestConditions bac = new DataLakeRequestConditions()
.setLeaseId(leaseID)
.setIfMatch(match)
.setIfNoneMatch(setupPathMatchCondition(fc, noneMatch))
.setIfModifiedSince(modified)
.setIfUnmodifiedSince(unmodified);
String expression = "SELECT * from BlobStorage";
StepVerifier.create(fc.queryWithResponse(
new FileQueryOptions(expression, new ByteArrayOutputStream()).setRequestConditions(bac)))
.verifyError(DataLakeStorageException.class);
}
@DisabledIf("olderThan20191212ServiceVersion")
@ParameterizedTest
@MethodSource("scheduleDeletionSupplier")
public void scheduleDeletion(FileScheduleDeletionOptions fileScheduleDeletionOptions, boolean hasExpiry) {
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(hasExpiry, fileAsyncClient.getProperties().block().getExpiresOn() != null);
}
private static Stream<Arguments> scheduleDeletionSupplier() {
return Stream.of(
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.CREATION_TIME), true),
Arguments.of(new FileScheduleDeletionOptions(Duration.ofDays(1), FileExpirationOffset.NOW), true),
Arguments.of(new FileScheduleDeletionOptions(), false),
Arguments.of(null, false)
);
}
private static boolean olderThan20191212ServiceVersion() {
return olderThan(DataLakeServiceVersion.V2019_12_12);
}
@DisabledIf("olderThan20191212ServiceVersion")
@Test
public void scheduleDeletionTime() {
OffsetDateTime now = testResourceNamer.now();
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(now.plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
fileAsyncClient.create().block();
fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions).block();
assertEquals(now.plusDays(1).truncatedTo(ChronoUnit.SECONDS), fileAsyncClient.getProperties().block().getExpiresOn());
}
@Test
public void scheduleDeletionError() {
FileScheduleDeletionOptions fileScheduleDeletionOptions = new FileScheduleDeletionOptions(testResourceNamer.now().plusDays(1));
DataLakeFileAsyncClient fileAsyncClient = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
StepVerifier.create(fileAsyncClient.scheduleDeletionWithResponse(fileScheduleDeletionOptions))
.verifyError(DataLakeStorageException.class);
}
static class MockProgressReceiver implements Consumer<FileQueryProgress> {
List<Long> progressList = new ArrayList<>();
@Override
public void accept(FileQueryProgress progress) {
progressList.add(progress.getBytesScanned());
}
}
static class MockErrorReceiver implements Consumer<FileQueryError> {
String expectedType;
int numErrors;
MockErrorReceiver(String expectedType) {
this.expectedType = expectedType;
this.numErrors = 0;
}
@Override
public void accept(FileQueryError error) {
assertFalse(error.isFatal());
assertEquals(expectedType, error.getName());
numErrors++;
}
}
private static final class RandomOtherSerialization implements FileQuerySerialization {
}
@Test
public void uploadInputStreamOverwriteFails() {
StepVerifier.create(fc.upload(DATA.getDefaultBinaryData(), null))
.verifyError(IllegalArgumentException.class);
}
@Test
public void uploadInputStreamOverwrite() {
fc.upload(DATA.getDefaultBinaryData(), null, true).block();
byte[] readArray = FluxUtil.collectBytesInByteBufferStream(fc.read()).block();
TestUtils.assertArraysEqual(DATA.getDefaultBytes(), readArray);
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@Test
public void uploadInputStreamLargeData() {
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(20 * Constants.MB));
ParallelTransferOptions pto = new ParallelTransferOptions().setMaxSingleUploadSizeLong((long) Constants.MB);
assertDoesNotThrow(() -> fc.uploadWithResponse(new FileParallelUploadOptions(input, 20 * Constants.MB)
.setParallelTransferOptions(pto)).block());
}
@SuppressWarnings("deprecation")
@EnabledIf("com.azure.storage.file.datalake.DataLakeTestBase
@ParameterizedTest
@MethodSource("uploadNumberOfAppendsSupplier")
public void uploadNumAppends(int dataSize, Long singleUploadSize, Long blockSize, int numAppends) {
DataLakeFileAsyncClient fac = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
AtomicInteger numAppendsCounter = new AtomicInteger(0);
DataLakeFileAsyncClient spyClient = new DataLakeFileAsyncClient(fac) {
@Override
Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Context context) {
numAppendsCounter.incrementAndGet();
return super.appendWithResponse(data, fileOffset, length, appendOptions, context);
}
};
ByteArrayInputStream input = new ByteArrayInputStream(getRandomByteArray(dataSize));
ParallelTransferOptions pto = new ParallelTransferOptions().setBlockSizeLong(blockSize)
.setMaxSingleUploadSizeLong(singleUploadSize);
StepVerifier.create(spyClient.uploadWithResponse(new FileParallelUploadOptions(input, dataSize)
.setParallelTransferOptions(pto)))
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(fac.getProperties())
.assertNext(properties -> assertEquals(dataSize, properties.getFileSize()))
.verifyComplete();
assertEquals(numAppends, numAppendsCounter.get());
}
private static Stream<Arguments> uploadNumberOfAppendsSupplier() {
return Stream.of(
Arguments.of((100 * Constants.MB) - 1, null, null, 1),
Arguments.of((100 * Constants.MB) + 1, null, null, (int) Math.ceil(((double) (100 * Constants.MB) + 1) / (double) (4 * Constants.MB))),
Arguments.of(100, 50L, null, 1),
Arguments.of(100, 50L, 20L, 5)
);
}
@SuppressWarnings("deprecation")
@Test
public void uploadReturnValue() {
assertNotNull(fc.uploadWithResponse(
new FileParallelUploadOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())).block()
.getValue().getETag());
}
@Test
public void perCallPolicy() {
DataLakeFileAsyncClient fileAsyncClient = getPathClientBuilder(getDataLakeCredential(), fc.getFileUrl())
.addPolicy(getPerCallVersionPolicy())
.buildFileAsyncClient();
assertEquals("2019-02-02", fileAsyncClient.getPropertiesWithResponse(null).block().getHeaders()
.getValue(X_MS_VERSION));
assertEquals("2019-02-02", fileAsyncClient.getAccessControlWithResponse(false, null).block().getHeaders()
.getValue(X_MS_VERSION));
}
} |
Does this mean it isn't valid JSON? The value of a feature flag should always be valid JSON, as it's part of the content-type. | public String getValue() {
if (!isValidValue) {
return originalValue;
}
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
originalValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(originalValue);
return originalValue;
} | if (!isValidValue) { | public String getValue() {
String newValue = null;
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
newValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(newValue);
return newValue;
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private String originalValue;
private boolean isValidValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
originalValue = value;
super.setValue(value);
isValidValue = tryParseValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag object"));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.getClientFilters(), (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private boolean tryParseValue(String value) {
parsedProperties.clear();
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
return jsonReader.readObject(reader -> {
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
return false;
}
}
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private boolean isValidFeatureFlagValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidFeatureFlagValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
tryParseValue(value);
isValidFeatureFlagValue = true;
super.setValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidFeatureFlagValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag configuration setting."));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.clientFilters, (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private void tryParseValue(String value) {
parsedProperties.clear();
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
jsonReader.readObject(reader -> {
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
isValidFeatureFlagValue = false;
throw LOGGER.logExceptionAsError(new IllegalArgumentException(e));
}
}
} |
FeatureFlagConfigurationSetting is also a ConfigurationSetting. So when user calls getValue() it should return a value. | public String getValue() {
if (!isValidValue) {
return originalValue;
}
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
originalValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(originalValue);
return originalValue;
} | if (!isValidValue) { | public String getValue() {
String newValue = null;
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
newValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(newValue);
return newValue;
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private String originalValue;
private boolean isValidValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
originalValue = value;
super.setValue(value);
isValidValue = tryParseValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag object"));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.getClientFilters(), (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private boolean tryParseValue(String value) {
parsedProperties.clear();
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
return jsonReader.readObject(reader -> {
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
return false;
}
}
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private boolean isValidFeatureFlagValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidFeatureFlagValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
tryParseValue(value);
isValidFeatureFlagValue = true;
super.setValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidFeatureFlagValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag configuration setting."));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.clientFilters, (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private void tryParseValue(String value) {
parsedProperties.clear();
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
jsonReader.readObject(reader -> {
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
isValidFeatureFlagValue = false;
throw LOGGER.logExceptionAsError(new IllegalArgumentException(e));
}
}
} |
My question is what is an invalid Value? | public String getValue() {
if (!isValidValue) {
return originalValue;
}
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
originalValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(originalValue);
return originalValue;
} | if (!isValidValue) { | public String getValue() {
String newValue = null;
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
newValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(newValue);
return newValue;
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private String originalValue;
private boolean isValidValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
originalValue = value;
super.setValue(value);
isValidValue = tryParseValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag object"));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.getClientFilters(), (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private boolean tryParseValue(String value) {
parsedProperties.clear();
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
return jsonReader.readObject(reader -> {
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
return false;
}
}
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private boolean isValidFeatureFlagValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidFeatureFlagValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
tryParseValue(value);
isValidFeatureFlagValue = true;
super.setValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidFeatureFlagValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag configuration setting."));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.clientFilters, (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private void tryParseValue(String value) {
parsedProperties.clear();
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
jsonReader.readObject(reader -> {
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
isValidFeatureFlagValue = false;
throw LOGGER.logExceptionAsError(new IllegalArgumentException(e));
}
}
} |
An invalid feature flag value. Sorry for the confusion. https://github.com/Azure/azure-sdk-for-java/pull/36725/files#diff-8f1ea70b39172e558f2acd5e3fa90011bc0a2f379ac547ab7a760799f02ce77eR91 | public String getValue() {
if (!isValidValue) {
return originalValue;
}
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
originalValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(originalValue);
return originalValue;
} | if (!isValidValue) { | public String getValue() {
String newValue = null;
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
newValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(newValue);
return newValue;
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private String originalValue;
private boolean isValidValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
originalValue = value;
super.setValue(value);
isValidValue = tryParseValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag object"));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.getClientFilters(), (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private boolean tryParseValue(String value) {
parsedProperties.clear();
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
return jsonReader.readObject(reader -> {
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
return false;
}
}
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private boolean isValidFeatureFlagValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidFeatureFlagValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
tryParseValue(value);
isValidFeatureFlagValue = true;
super.setValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidFeatureFlagValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag configuration setting."));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.clientFilters, (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private void tryParseValue(String value) {
parsedProperties.clear();
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
jsonReader.readObject(reader -> {
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
isValidFeatureFlagValue = false;
throw LOGGER.logExceptionAsError(new IllegalArgumentException(e));
}
}
} |
That invalid value shouldn't be allowed to be set. It will fail if sent to app configuration. | public String getValue() {
if (!isValidValue) {
return originalValue;
}
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
originalValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(originalValue);
return originalValue;
} | if (!isValidValue) { | public String getValue() {
String newValue = null;
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
newValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(newValue);
return newValue;
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private String originalValue;
private boolean isValidValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
originalValue = value;
super.setValue(value);
isValidValue = tryParseValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag object"));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.getClientFilters(), (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private boolean tryParseValue(String value) {
parsedProperties.clear();
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
return jsonReader.readObject(reader -> {
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
return false;
}
}
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private boolean isValidFeatureFlagValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidFeatureFlagValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
tryParseValue(value);
isValidFeatureFlagValue = true;
super.setValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidFeatureFlagValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag configuration setting."));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.clientFilters, (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private void tryParseValue(String value) {
parsedProperties.clear();
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
jsonReader.readObject(reader -> {
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
isValidFeatureFlagValue = false;
throw LOGGER.logExceptionAsError(new IllegalArgumentException(e));
}
}
} |
We are trying to avoid client side validations but the feature flag is still a ConfigurationSetting. | public String getValue() {
if (!isValidValue) {
return originalValue;
}
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
originalValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(originalValue);
return originalValue;
} | if (!isValidValue) { | public String getValue() {
String newValue = null;
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
newValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(newValue);
return newValue;
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private String originalValue;
private boolean isValidValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
originalValue = value;
super.setValue(value);
isValidValue = tryParseValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag object"));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.getClientFilters(), (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private boolean tryParseValue(String value) {
parsedProperties.clear();
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
return jsonReader.readObject(reader -> {
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
return false;
}
}
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private boolean isValidFeatureFlagValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidFeatureFlagValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
tryParseValue(value);
isValidFeatureFlagValue = true;
super.setValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidFeatureFlagValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag configuration setting."));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.clientFilters, (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private void tryParseValue(String value) {
parsedProperties.clear();
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
jsonReader.readObject(reader -> {
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
isValidFeatureFlagValue = false;
throw LOGGER.logExceptionAsError(new IllegalArgumentException(e));
}
}
} |
If you remove the code and try to parse it as is, then it will result in an error which would be correct. Or you could make value not settable, and make it so we have a "other" map which updates the value field. I think another language does that. | public String getValue() {
if (!isValidValue) {
return originalValue;
}
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
originalValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(originalValue);
return originalValue;
} | if (!isValidValue) { | public String getValue() {
String newValue = null;
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
newValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(newValue);
return newValue;
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private String originalValue;
private boolean isValidValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
originalValue = value;
super.setValue(value);
isValidValue = tryParseValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag object"));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.getClientFilters(), (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private boolean tryParseValue(String value) {
parsedProperties.clear();
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
return jsonReader.readObject(reader -> {
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
return false;
}
}
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private boolean isValidFeatureFlagValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidFeatureFlagValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
tryParseValue(value);
isValidFeatureFlagValue = true;
super.setValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidFeatureFlagValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag configuration setting."));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.clientFilters, (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private void tryParseValue(String value) {
parsedProperties.clear();
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
jsonReader.readObject(reader -> {
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
isValidFeatureFlagValue = false;
throw LOGGER.logExceptionAsError(new IllegalArgumentException(e));
}
}
} |
Hmm. I try to align with what .NET shipped: https://github.com/Azure/azure-sdk-for-net/blob/f41d1a08ff2c2b8d748c61774caac5491bf18b88/sdk/appconfiguration/Azure.Data.AppConfiguration/src/Models/FeatureFlagConfigurationSetting.cs#L192 | public String getValue() {
if (!isValidValue) {
return originalValue;
}
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
originalValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(originalValue);
return originalValue;
} | if (!isValidValue) { | public String getValue() {
String newValue = null;
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
newValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(newValue);
return newValue;
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private String originalValue;
private boolean isValidValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
originalValue = value;
super.setValue(value);
isValidValue = tryParseValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag object"));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.getClientFilters(), (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private boolean tryParseValue(String value) {
parsedProperties.clear();
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
return jsonReader.readObject(reader -> {
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
return false;
}
}
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private boolean isValidFeatureFlagValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidFeatureFlagValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
tryParseValue(value);
isValidFeatureFlagValue = true;
super.setValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidFeatureFlagValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag configuration setting."));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.clientFilters, (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private void tryParseValue(String value) {
parsedProperties.clear();
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
jsonReader.readObject(reader -> {
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
isValidFeatureFlagValue = false;
throw LOGGER.logExceptionAsError(new IllegalArgumentException(e));
}
}
} |
After an offline discussion with Matt. Java will throw an exception if the value is invalid instead of returning the original value. It aligns with the preview version behavior in Java. | public String getValue() {
if (!isValidValue) {
return originalValue;
}
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
originalValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(originalValue);
return originalValue;
} | if (!isValidValue) { | public String getValue() {
String newValue = null;
try {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final JsonWriter writer = JsonProviders.createWriter(outputStream);
final Set<String> knownProperties = new LinkedHashSet<>(requiredOrOptionalJsonProperties);
writer.writeStartObject();
for (Map.Entry<String, Object> entry : parsedProperties.entrySet()) {
final String name = entry.getKey();
final Object jsonValue = entry.getValue();
try {
if (tryWriteKnownProperty(name, jsonValue, writer, true)) {
knownProperties.remove(name);
} else {
writer.writeUntypedField(name, jsonValue);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
for (final String propertyName : knownProperties) {
tryWriteKnownProperty(propertyName, null, writer, false);
}
writer.writeEndObject();
writer.flush();
newValue = outputStream.toString(StandardCharsets.UTF_8.name());
outputStream.close();
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
super.setValue(newValue);
return newValue;
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private String originalValue;
private boolean isValidValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
originalValue = value;
super.setValue(value);
isValidValue = tryParseValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag object"));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.getClientFilters(), (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private boolean tryParseValue(String value) {
parsedProperties.clear();
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
return jsonReader.readObject(reader -> {
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
return false;
}
}
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
private boolean isValidFeatureFlagValue;
private final Map<String, Object> parsedProperties = new LinkedHashMap<>(5);
private final List<String> requiredJsonProperties = Arrays.asList(ID, ENABLED, CONDITIONS);
private final List<String> requiredOrOptionalJsonProperties =
Arrays.asList(ID, DESCRIPTION, DISPLAY_NAME, ENABLED, CONDITIONS);
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
isValidFeatureFlagValue = true;
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
}
@Override
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
tryParseValue(value);
isValidFeatureFlagValue = true;
super.setValue(value);
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getFeatureId() {
checkValid();
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
checkValid();
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public boolean isEnabled() {
checkValid();
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
checkValid();
this.isEnabled = isEnabled;
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDescription() {
checkValid();
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
checkValid();
this.description = description;
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public String getDisplayName() {
checkValid();
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
checkValid();
this.displayName = displayName;
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public List<FeatureFlagFilter> getClientFilters() {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
checkValid();
this.clientFilters = clientFilters;
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
checkValid();
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
return this;
}
private void checkValid() {
if (!isValidFeatureFlagValue) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("The content of the " + super.getValue()
+ " property do not represent a valid feature flag configuration setting."));
}
}
private boolean tryWriteKnownProperty(String propertyName, Object propertyValue, JsonWriter writer,
boolean includeOptionalWhenNull) throws IOException {
switch (propertyName) {
case ID:
writer.writeStringField(ID, featureId);
break;
case DESCRIPTION:
if (includeOptionalWhenNull || description != null) {
writer.writeStringField(DESCRIPTION, description);
}
break;
case DISPLAY_NAME:
if (includeOptionalWhenNull || displayName != null) {
writer.writeStringField(DISPLAY_NAME, displayName);
}
break;
case ENABLED:
writer.writeBooleanField(ENABLED, isEnabled);
break;
case CONDITIONS:
tryWriteConditions(propertyValue, writer);
break;
default:
return false;
}
return true;
}
private void tryWriteConditions(Object propertyValue, JsonWriter writer) throws IOException {
writer.writeStartObject(CONDITIONS);
if (propertyValue != null && propertyValue instanceof Conditions) {
Conditions propertyValueClone = (Conditions) propertyValue;
for (Map.Entry<String, Object> entry : propertyValueClone.getUnknownConditions().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
writer.writeUntypedField(key, value);
}
}
writer.writeArrayField(CLIENT_FILTERS, this.clientFilters, (jsonWriter, filter) -> {
jsonWriter.writeStartObject();
jsonWriter.writeStringField(NAME, filter.getName());
jsonWriter.writeMapField(PARAMETERS, filter.getParameters(), JsonWriter::writeUntyped);
jsonWriter.writeEndObject();
});
writer.writeEndObject();
}
private void tryParseValue(String value) {
parsedProperties.clear();
try (JsonReader jsonReader = JsonProviders.createReader(value)) {
jsonReader.readObject(reader -> {
final Set<String> requiredPropertiesCopy = new LinkedHashSet<>(requiredJsonProperties);
String featureIdCopy = this.featureId;
String descriptionCopy = this.description;
String displayNameCopy = this.displayName;
boolean isEnabledCopy = this.isEnabled;
List<FeatureFlagFilter> featureFlagFiltersCopy = this.clientFilters;
while (reader.nextToken() != JsonToken.END_OBJECT) {
final String fieldName = reader.getFieldName();
reader.nextToken();
if (ID.equals(fieldName)) {
final String id = reader.getString();
featureIdCopy = id;
parsedProperties.put(ID, id);
} else if (DESCRIPTION.equals(fieldName)) {
final String description = reader.getString();
descriptionCopy = description;
parsedProperties.put(DESCRIPTION, description);
} else if (DISPLAY_NAME.equals(fieldName)) {
final String displayName = reader.getString();
displayNameCopy = displayName;
parsedProperties.put(DISPLAY_NAME, displayName);
} else if (ENABLED.equals(fieldName)) {
final boolean isEnabled = reader.getBoolean();
isEnabledCopy = isEnabled;
parsedProperties.put(ENABLED, isEnabled);
} else if (CONDITIONS.equals(fieldName)) {
final Conditions conditions = readConditions(reader);
if (conditions != null) {
List<FeatureFlagFilter> featureFlagFilters = conditions.getFeatureFlagFilters();
featureFlagFiltersCopy = featureFlagFilters;
parsedProperties.put(CONDITIONS, conditions);
}
} else {
parsedProperties.put(fieldName, reader.readUntyped());
}
requiredPropertiesCopy.remove(fieldName);
}
this.featureId = featureIdCopy;
this.description = descriptionCopy;
this.displayName = displayNameCopy;
this.isEnabled = isEnabledCopy;
this.clientFilters = featureFlagFiltersCopy;
return requiredPropertiesCopy.isEmpty();
});
} catch (IOException e) {
isValidFeatureFlagValue = false;
throw LOGGER.logExceptionAsError(new IllegalArgumentException(e));
}
}
} |
nice :+1: | public Mono<Void> flush(boolean awaitLock, boolean isClose, Context context) {
if (awaitLock) {
processingSemaphore.acquireUninterruptibly();
return Mono.using(() -> processingSemaphore, ignored -> flushLoop(isClose, context), Semaphore::release);
} else if (processingSemaphore.tryAcquire()) {
return Mono.using(() -> processingSemaphore, ignored -> flushLoop(isClose, context), Semaphore::release);
} else {
LOGGER.verbose("Batch already in-flight and not waiting for completion. Performing no-op.");
return Mono.empty();
}
} | return Mono.using(() -> processingSemaphore, ignored -> flushLoop(isClose, context), Semaphore::release); | public Mono<Void> flush(boolean awaitLock, boolean isClose, Context context) {
if (awaitLock) {
processingSemaphore.acquireUninterruptibly();
return Mono.using(() -> processingSemaphore, ignored -> flushLoop(isClose, context), Semaphore::release);
} else if (processingSemaphore.tryAcquire()) {
return Mono.using(() -> processingSemaphore, ignored -> flushLoop(isClose, context), Semaphore::release);
} else {
LOGGER.verbose("Batch already in-flight and not waiting for completion. Performing no-op.");
return Mono.empty();
}
} | class SearchIndexingPublisher<T> {
private static final double JITTER_FACTOR = 0.05;
private static final String BATCH_SIZE_SCALED_DOWN =
"Scaling down batch size due to 413 (Payload too large) response.{}Scaled down from {} to {}";
private static final ClientLogger LOGGER = new ClientLogger(SearchIndexingPublisher.class);
private final SearchIndexClientImpl restClient;
private final JsonSerializer serializer;
private final boolean autoFlush;
private int batchActionCount;
private final int maxRetries;
private final Duration throttlingDelay;
private final Duration maxThrottlingDelay;
private final Consumer<OnActionAddedOptions<T>> onActionAddedConsumer;
private final Consumer<OnActionSentOptions<T>> onActionSentConsumer;
private final Consumer<OnActionSucceededOptions<T>> onActionSucceededConsumer;
private final Consumer<OnActionErrorOptions<T>> onActionErrorConsumer;
private final Function<T, String> documentKeyRetriever;
private final Function<Integer, Integer> scaleDownFunction = size -> size / 2;
private final Object actionsMutex = new Object();
private final Deque<TryTrackingIndexAction<T>> actions = new ConcurrentLinkedDeque<>();
/*
* This queue keeps track of documents that are currently being sent to the service for indexing. This queue is
* resilient against cases where the request timeouts or is cancelled by an external operation, preventing the
* documents from being lost.
*/
private final Deque<TryTrackingIndexAction<T>> inFlightActions = new ConcurrentLinkedDeque<>();
private final Semaphore processingSemaphore = new Semaphore(1);
volatile AtomicInteger backoffCount = new AtomicInteger();
volatile Duration currentRetryDelay = Duration.ZERO;
public SearchIndexingPublisher(SearchIndexClientImpl restClient, JsonSerializer serializer,
Function<T, String> documentKeyRetriever, boolean autoFlush, int initialBatchActionCount,
int maxRetriesPerAction, Duration throttlingDelay, Duration maxThrottlingDelay,
Consumer<OnActionAddedOptions<T>> onActionAddedConsumer,
Consumer<OnActionSucceededOptions<T>> onActionSucceededConsumer,
Consumer<OnActionErrorOptions<T>> onActionErrorConsumer,
Consumer<OnActionSentOptions<T>> onActionSentConsumer) {
this.documentKeyRetriever = Objects.requireNonNull(documentKeyRetriever,
"'documentKeyRetriever' cannot be null");
this.restClient = restClient;
this.serializer = serializer;
this.autoFlush = autoFlush;
this.batchActionCount = initialBatchActionCount;
this.maxRetries = maxRetriesPerAction;
this.throttlingDelay = throttlingDelay;
this.maxThrottlingDelay = (maxThrottlingDelay.compareTo(this.throttlingDelay) < 0)
? this.throttlingDelay
: maxThrottlingDelay;
this.onActionAddedConsumer = onActionAddedConsumer;
this.onActionSentConsumer = onActionSentConsumer;
this.onActionSucceededConsumer = onActionSucceededConsumer;
this.onActionErrorConsumer = onActionErrorConsumer;
}
public synchronized Collection<IndexAction<T>> getActions() {
List<IndexAction<T>> actions = new ArrayList<>();
for (TryTrackingIndexAction<T> inFlightAction : inFlightActions) {
actions.add(inFlightAction.getAction());
}
for (TryTrackingIndexAction<T> action : this.actions) {
actions.add(action.getAction());
}
return actions;
}
public int getBatchActionCount() {
return batchActionCount;
}
public synchronized Duration getCurrentRetryDelay() {
return currentRetryDelay;
}
public synchronized Mono<Void> addActions(Collection<IndexAction<T>> actions, Context context,
Runnable rescheduleFlush) {
actions.stream()
.map(action -> new TryTrackingIndexAction<>(action, documentKeyRetriever.apply(action.getDocument())))
.forEach(action -> {
if (onActionAddedConsumer != null) {
onActionAddedConsumer.accept(new OnActionAddedOptions<>(action.getAction()));
}
this.actions.add(action);
});
LOGGER.verbose("Actions added, new pending queue size: {}.", this.actions.size());
if (autoFlush && batchAvailableForProcessing()) {
rescheduleFlush.run();
LOGGER.verbose("Adding documents triggered batch size limit, sending documents for indexing.");
return flush(false, false, context);
}
return Mono.empty();
}
private Mono<Void> flushLoop(boolean isClosed, Context context) {
return createAndProcessBatch(context)
.expand(ignored -> Flux.defer(() -> (batchAvailableForProcessing() || isClosed)
? createAndProcessBatch(context)
: Flux.empty()))
.then();
}
private Mono<IndexBatchResponse> createAndProcessBatch(Context context) {
List<TryTrackingIndexAction<T>> batchActions = createBatch();
if (CoreUtils.isNullOrEmpty(batchActions)) {
return Mono.empty();
}
List<com.azure.search.documents.implementation.models.IndexAction> convertedActions = batchActions.stream()
.map(action -> IndexActionConverter.map(action.getAction(), serializer))
.collect(Collectors.toList());
return sendBatch(convertedActions, batchActions, context)
.map(response -> {
handleResponse(batchActions, response);
return response;
});
}
private List<TryTrackingIndexAction<T>> createBatch() {
final List<TryTrackingIndexAction<T>> batchActions;
final Set<String> keysInBatch;
synchronized (actionsMutex) {
int actionSize = this.actions.size();
int inFlightActionSize = this.inFlightActions.size();
int size = Math.min(batchActionCount, actionSize + inFlightActionSize);
batchActions = new ArrayList<>(size);
keysInBatch = new HashSet<>(size * 2);
int inFlightDocumentsAdded = fillFromQueue(batchActions, inFlightActions, size, keysInBatch);
if (inFlightDocumentsAdded == size) {
reinsertFailedActions(inFlightActions);
} else {
fillFromQueue(batchActions, actions, size - inFlightDocumentsAdded, keysInBatch);
}
}
return batchActions;
}
private static <T> int fillFromQueue(List<TryTrackingIndexAction<T>> batch, Deque<TryTrackingIndexAction<T>> queue,
int requested, Set<String> duplicateKeyTracker) {
int actionsAdded = 0;
Iterator<TryTrackingIndexAction<T>> iterator = queue.iterator();
while (actionsAdded < requested && iterator.hasNext()) {
TryTrackingIndexAction<T> potentialDocumentToAdd = iterator.next();
if (duplicateKeyTracker.contains(potentialDocumentToAdd.getKey())) {
continue;
}
duplicateKeyTracker.add(potentialDocumentToAdd.getKey());
batch.add(potentialDocumentToAdd);
iterator.remove();
actionsAdded += 1;
}
return actionsAdded;
}
/*
* This may result in more than one service call in the case where the index batch is too large and we attempt to
* split it.
*/
private Mono<IndexBatchResponse> sendBatch(
List<com.azure.search.documents.implementation.models.IndexAction> actions,
List<TryTrackingIndexAction<T>> batchActions,
Context context) {
LOGGER.verbose("Sending a batch of size {}.", batchActions.size());
if (onActionSentConsumer != null) {
batchActions.forEach(action -> onActionSentConsumer.accept(new OnActionSentOptions<>(action.getAction())));
}
Mono<Response<IndexDocumentsResult>> batchCall = Utility.indexDocumentsWithResponseAsync(restClient, actions, true,
context, LOGGER);
if (!currentRetryDelay.isZero() && !currentRetryDelay.isNegative()) {
batchCall = batchCall.delaySubscription(currentRetryDelay);
}
return batchCall.map(response -> new IndexBatchResponse(response.getStatusCode(),
response.getValue().getResults(), actions.size(), false))
.doOnCancel(() -> {
LOGGER.warning("Request was cancelled before response, adding all in-flight documents back to queue.");
inFlightActions.addAll(batchActions);
})
.onErrorResume(IndexBatchException.class, exception -> Mono.just(
new IndexBatchResponse(207, exception.getIndexingResults(), actions.size(), true)))
.onErrorResume(HttpResponseException.class, exception -> {
/*
* If we received an error response where the payload was too large split it into two smaller payloads
* and attempt to index again. If the number of index actions was one raise the error as we cannot split
* that any further.
*/
int statusCode = exception.getResponse().getStatusCode();
if (statusCode == HttpURLConnection.HTTP_ENTITY_TOO_LARGE) {
/*
* Pass both the sent batch size and the configured batch size. This covers that case where the
* sent batch size was smaller than the configured batch size and a 413 was trigger.
*
* For example, by default the configured batch size defaults to 512 but a batch of 200 may be sent
* and trigger 413, if we only halved 512 we'd send the same batch again and 413 a second time.
* Instead in this scenario we should halve 200 to 100.
*/
int previousBatchSize = Math.min(batchActionCount, actions.size());
this.batchActionCount = Math.max(1, scaleDownFunction.apply(previousBatchSize));
LOGGER.verbose(BATCH_SIZE_SCALED_DOWN, System.lineSeparator(), previousBatchSize, batchActionCount);
int actionCount = actions.size();
if (actionCount == 1) {
return Mono.just(new IndexBatchResponse(statusCode, null, actionCount, true));
}
int splitOffset = Math.min(actions.size(), batchActionCount);
List<TryTrackingIndexAction<T>> batchActionsToRemove = batchActions.subList(splitOffset,
batchActions.size());
reinsertFailedActions(batchActionsToRemove);
batchActionsToRemove.clear();
return sendBatch(actions.subList(0, splitOffset), batchActions, context);
}
return Mono.just(new IndexBatchResponse(statusCode, null, actions.size(), true));
})
.onErrorResume(Exception.class, ignored ->
Mono.just(new IndexBatchResponse(0, null, actions.size(), true)));
}
private void handleResponse(List<TryTrackingIndexAction<T>> actions, IndexBatchResponse batchResponse) {
/*
* Batch has been split until it had one document in it and it returned a 413 response.
*/
if (batchResponse.getStatusCode() == HttpURLConnection.HTTP_ENTITY_TOO_LARGE && batchResponse.getCount() == 1) {
IndexAction<T> action = actions.get(0).getAction();
if (onActionErrorConsumer != null) {
onActionErrorConsumer.accept(new OnActionErrorOptions<>(action)
.setThrowable(createDocumentTooLargeException()));
}
return;
}
Deque<TryTrackingIndexAction<T>> actionsToRetry = new LinkedList<>();
boolean has503 = batchResponse.getStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE;
if (batchResponse.getResults() == null) {
/*
* Null results indicates that the entire request failed. Retry all documents.
*/
actionsToRetry.addAll(actions);
} else {
/*
* We got back a result set, correlate responses to their request document and add retryable actions back
* into the queue.
*/
for (IndexingResult result : batchResponse.getResults()) {
String key = result.getKey();
TryTrackingIndexAction<T> action = actions.stream()
.filter(a -> key.equals(a.getKey()))
.findFirst()
.orElse(null);
if (action == null) {
LOGGER.warning("Unable to correlate result key {} to initial document.", key);
continue;
}
if (isSuccess(result.getStatusCode())) {
if (onActionSucceededConsumer != null) {
onActionSucceededConsumer.accept(new OnActionSucceededOptions<>(action.getAction()));
}
} else if (isRetryable(result.getStatusCode())) {
has503 |= result.getStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE;
if (action.getTryCount() < maxRetries) {
action.incrementTryCount();
actionsToRetry.add(action);
} else {
if (onActionErrorConsumer != null) {
onActionErrorConsumer.accept(new OnActionErrorOptions<>(action.getAction())
.setThrowable(createDocumentHitRetryLimitException())
.setIndexingResult(result));
}
}
} else {
if (onActionErrorConsumer != null) {
onActionErrorConsumer.accept(new OnActionErrorOptions<>(action.getAction())
.setIndexingResult(result));
}
}
}
}
if (has503) {
currentRetryDelay = calculateRetryDelay(backoffCount.getAndIncrement());
} else {
backoffCount.set(0);
currentRetryDelay = Duration.ZERO;
}
if (!CoreUtils.isNullOrEmpty(actionsToRetry)) {
reinsertFailedActions(actionsToRetry);
}
}
private void reinsertFailedActions(Deque<TryTrackingIndexAction<T>> actionsToRetry) {
synchronized (actionsMutex) {
actionsToRetry.descendingIterator().forEachRemaining(actions::add);
}
}
private void reinsertFailedActions(List<TryTrackingIndexAction<T>> actionsToRetry) {
synchronized (actionsMutex) {
for (int i = actionsToRetry.size() - 1; i >= 0; i--) {
this.actions.push(actionsToRetry.get(i));
}
}
}
private boolean batchAvailableForProcessing() {
return (actions.size() + inFlightActions.size()) >= batchActionCount;
}
private static boolean isSuccess(int statusCode) {
return statusCode == 200 || statusCode == 201;
}
private static boolean isRetryable(int statusCode) {
return statusCode == 409 || statusCode == 422 || statusCode == 503;
}
private Duration calculateRetryDelay(int backoffCount) {
long delayWithJitterInNanos = ThreadLocalRandom.current()
.nextLong((long) (throttlingDelay.toNanos() * (1 - JITTER_FACTOR)),
(long) (throttlingDelay.toNanos() * (1 + JITTER_FACTOR)));
return Duration.ofNanos(Math.min((1L << backoffCount) * delayWithJitterInNanos, maxThrottlingDelay.toNanos()));
}
private static RuntimeException createDocumentTooLargeException() {
return new RuntimeException("Document is too large to be indexed and won't be tried again.");
}
private static RuntimeException createDocumentHitRetryLimitException() {
return new RuntimeException("Document has reached retry limit and won't be tried again.");
}
} | class SearchIndexingPublisher<T> {
private static final double JITTER_FACTOR = 0.05;
private static final String BATCH_SIZE_SCALED_DOWN =
"Scaling down batch size due to 413 (Payload too large) response.{}Scaled down from {} to {}";
private static final ClientLogger LOGGER = new ClientLogger(SearchIndexingPublisher.class);
private final SearchIndexClientImpl restClient;
private final JsonSerializer serializer;
private final boolean autoFlush;
private int batchActionCount;
private final int maxRetries;
private final Duration throttlingDelay;
private final Duration maxThrottlingDelay;
private final Consumer<OnActionAddedOptions<T>> onActionAddedConsumer;
private final Consumer<OnActionSentOptions<T>> onActionSentConsumer;
private final Consumer<OnActionSucceededOptions<T>> onActionSucceededConsumer;
private final Consumer<OnActionErrorOptions<T>> onActionErrorConsumer;
private final Function<T, String> documentKeyRetriever;
private final Function<Integer, Integer> scaleDownFunction = size -> size / 2;
private final Object actionsMutex = new Object();
private final Deque<TryTrackingIndexAction<T>> actions = new ConcurrentLinkedDeque<>();
/*
* This queue keeps track of documents that are currently being sent to the service for indexing. This queue is
* resilient against cases where the request timeouts or is cancelled by an external operation, preventing the
* documents from being lost.
*/
private final Deque<TryTrackingIndexAction<T>> inFlightActions = new ConcurrentLinkedDeque<>();
private final Semaphore processingSemaphore = new Semaphore(1);
volatile AtomicInteger backoffCount = new AtomicInteger();
volatile Duration currentRetryDelay = Duration.ZERO;
public SearchIndexingPublisher(SearchIndexClientImpl restClient, JsonSerializer serializer,
Function<T, String> documentKeyRetriever, boolean autoFlush, int initialBatchActionCount,
int maxRetriesPerAction, Duration throttlingDelay, Duration maxThrottlingDelay,
Consumer<OnActionAddedOptions<T>> onActionAddedConsumer,
Consumer<OnActionSucceededOptions<T>> onActionSucceededConsumer,
Consumer<OnActionErrorOptions<T>> onActionErrorConsumer,
Consumer<OnActionSentOptions<T>> onActionSentConsumer) {
this.documentKeyRetriever = Objects.requireNonNull(documentKeyRetriever,
"'documentKeyRetriever' cannot be null");
this.restClient = restClient;
this.serializer = serializer;
this.autoFlush = autoFlush;
this.batchActionCount = initialBatchActionCount;
this.maxRetries = maxRetriesPerAction;
this.throttlingDelay = throttlingDelay;
this.maxThrottlingDelay = (maxThrottlingDelay.compareTo(this.throttlingDelay) < 0)
? this.throttlingDelay
: maxThrottlingDelay;
this.onActionAddedConsumer = onActionAddedConsumer;
this.onActionSentConsumer = onActionSentConsumer;
this.onActionSucceededConsumer = onActionSucceededConsumer;
this.onActionErrorConsumer = onActionErrorConsumer;
}
public synchronized Collection<IndexAction<T>> getActions() {
List<IndexAction<T>> actions = new ArrayList<>();
for (TryTrackingIndexAction<T> inFlightAction : inFlightActions) {
actions.add(inFlightAction.getAction());
}
for (TryTrackingIndexAction<T> action : this.actions) {
actions.add(action.getAction());
}
return actions;
}
public int getBatchActionCount() {
return batchActionCount;
}
public synchronized Duration getCurrentRetryDelay() {
return currentRetryDelay;
}
public synchronized Mono<Void> addActions(Collection<IndexAction<T>> actions, Context context,
Runnable rescheduleFlush) {
actions.stream()
.map(action -> new TryTrackingIndexAction<>(action, documentKeyRetriever.apply(action.getDocument())))
.forEach(action -> {
if (onActionAddedConsumer != null) {
onActionAddedConsumer.accept(new OnActionAddedOptions<>(action.getAction()));
}
this.actions.add(action);
});
LOGGER.verbose("Actions added, new pending queue size: {}.", this.actions.size());
if (autoFlush && batchAvailableForProcessing()) {
rescheduleFlush.run();
LOGGER.verbose("Adding documents triggered batch size limit, sending documents for indexing.");
return flush(false, false, context);
}
return Mono.empty();
}
private Mono<Void> flushLoop(boolean isClosed, Context context) {
return createAndProcessBatch(context)
.expand(ignored -> Flux.defer(() -> (batchAvailableForProcessing() || isClosed)
? createAndProcessBatch(context)
: Flux.empty()))
.then();
}
private Mono<IndexBatchResponse> createAndProcessBatch(Context context) {
List<TryTrackingIndexAction<T>> batchActions = createBatch();
if (CoreUtils.isNullOrEmpty(batchActions)) {
return Mono.empty();
}
List<com.azure.search.documents.implementation.models.IndexAction> convertedActions = batchActions.stream()
.map(action -> IndexActionConverter.map(action.getAction(), serializer))
.collect(Collectors.toList());
return sendBatch(convertedActions, batchActions, context)
.map(response -> {
handleResponse(batchActions, response);
return response;
});
}
private List<TryTrackingIndexAction<T>> createBatch() {
final List<TryTrackingIndexAction<T>> batchActions;
final Set<String> keysInBatch;
synchronized (actionsMutex) {
int actionSize = this.actions.size();
int inFlightActionSize = this.inFlightActions.size();
int size = Math.min(batchActionCount, actionSize + inFlightActionSize);
batchActions = new ArrayList<>(size);
keysInBatch = new HashSet<>(size * 2);
int inFlightDocumentsAdded = fillFromQueue(batchActions, inFlightActions, size, keysInBatch);
if (inFlightDocumentsAdded == size) {
reinsertFailedActions(inFlightActions);
} else {
fillFromQueue(batchActions, actions, size - inFlightDocumentsAdded, keysInBatch);
}
}
return batchActions;
}
private static <T> int fillFromQueue(List<TryTrackingIndexAction<T>> batch, Deque<TryTrackingIndexAction<T>> queue,
int requested, Set<String> duplicateKeyTracker) {
int actionsAdded = 0;
Iterator<TryTrackingIndexAction<T>> iterator = queue.iterator();
while (actionsAdded < requested && iterator.hasNext()) {
TryTrackingIndexAction<T> potentialDocumentToAdd = iterator.next();
if (duplicateKeyTracker.contains(potentialDocumentToAdd.getKey())) {
continue;
}
duplicateKeyTracker.add(potentialDocumentToAdd.getKey());
batch.add(potentialDocumentToAdd);
iterator.remove();
actionsAdded += 1;
}
return actionsAdded;
}
/*
* This may result in more than one service call in the case where the index batch is too large and we attempt to
* split it.
*/
private Mono<IndexBatchResponse> sendBatch(
List<com.azure.search.documents.implementation.models.IndexAction> actions,
List<TryTrackingIndexAction<T>> batchActions,
Context context) {
LOGGER.verbose("Sending a batch of size {}.", batchActions.size());
if (onActionSentConsumer != null) {
batchActions.forEach(action -> onActionSentConsumer.accept(new OnActionSentOptions<>(action.getAction())));
}
Mono<Response<IndexDocumentsResult>> batchCall = Utility.indexDocumentsWithResponseAsync(restClient, actions, true,
context, LOGGER);
if (!currentRetryDelay.isZero() && !currentRetryDelay.isNegative()) {
batchCall = batchCall.delaySubscription(currentRetryDelay);
}
return batchCall.map(response -> new IndexBatchResponse(response.getStatusCode(),
response.getValue().getResults(), actions.size(), false))
.doOnCancel(() -> {
LOGGER.warning("Request was cancelled before response, adding all in-flight documents back to queue.");
inFlightActions.addAll(batchActions);
})
.onErrorResume(IndexBatchException.class, exception -> Mono.just(
new IndexBatchResponse(207, exception.getIndexingResults(), actions.size(), true)))
.onErrorResume(HttpResponseException.class, exception -> {
/*
* If we received an error response where the payload was too large split it into two smaller payloads
* and attempt to index again. If the number of index actions was one raise the error as we cannot split
* that any further.
*/
int statusCode = exception.getResponse().getStatusCode();
if (statusCode == HttpURLConnection.HTTP_ENTITY_TOO_LARGE) {
/*
* Pass both the sent batch size and the configured batch size. This covers that case where the
* sent batch size was smaller than the configured batch size and a 413 was trigger.
*
* For example, by default the configured batch size defaults to 512 but a batch of 200 may be sent
* and trigger 413, if we only halved 512 we'd send the same batch again and 413 a second time.
* Instead in this scenario we should halve 200 to 100.
*/
int previousBatchSize = Math.min(batchActionCount, actions.size());
this.batchActionCount = Math.max(1, scaleDownFunction.apply(previousBatchSize));
LOGGER.verbose(BATCH_SIZE_SCALED_DOWN, System.lineSeparator(), previousBatchSize, batchActionCount);
int actionCount = actions.size();
if (actionCount == 1) {
return Mono.just(new IndexBatchResponse(statusCode, null, actionCount, true));
}
int splitOffset = Math.min(actions.size(), batchActionCount);
List<TryTrackingIndexAction<T>> batchActionsToRemove = batchActions.subList(splitOffset,
batchActions.size());
reinsertFailedActions(batchActionsToRemove);
batchActionsToRemove.clear();
return sendBatch(actions.subList(0, splitOffset), batchActions, context);
}
return Mono.just(new IndexBatchResponse(statusCode, null, actions.size(), true));
})
.onErrorResume(Exception.class, ignored ->
Mono.just(new IndexBatchResponse(0, null, actions.size(), true)));
}
private void handleResponse(List<TryTrackingIndexAction<T>> actions, IndexBatchResponse batchResponse) {
/*
* Batch has been split until it had one document in it and it returned a 413 response.
*/
if (batchResponse.getStatusCode() == HttpURLConnection.HTTP_ENTITY_TOO_LARGE && batchResponse.getCount() == 1) {
IndexAction<T> action = actions.get(0).getAction();
if (onActionErrorConsumer != null) {
onActionErrorConsumer.accept(new OnActionErrorOptions<>(action)
.setThrowable(createDocumentTooLargeException()));
}
return;
}
Deque<TryTrackingIndexAction<T>> actionsToRetry = new LinkedList<>();
boolean has503 = batchResponse.getStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE;
if (batchResponse.getResults() == null) {
/*
* Null results indicates that the entire request failed. Retry all documents.
*/
actionsToRetry.addAll(actions);
} else {
/*
* We got back a result set, correlate responses to their request document and add retryable actions back
* into the queue.
*/
for (IndexingResult result : batchResponse.getResults()) {
String key = result.getKey();
TryTrackingIndexAction<T> action = actions.stream()
.filter(a -> key.equals(a.getKey()))
.findFirst()
.orElse(null);
if (action == null) {
LOGGER.warning("Unable to correlate result key {} to initial document.", key);
continue;
}
if (isSuccess(result.getStatusCode())) {
if (onActionSucceededConsumer != null) {
onActionSucceededConsumer.accept(new OnActionSucceededOptions<>(action.getAction()));
}
} else if (isRetryable(result.getStatusCode())) {
has503 |= result.getStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE;
if (action.getTryCount() < maxRetries) {
action.incrementTryCount();
actionsToRetry.add(action);
} else {
if (onActionErrorConsumer != null) {
onActionErrorConsumer.accept(new OnActionErrorOptions<>(action.getAction())
.setThrowable(createDocumentHitRetryLimitException())
.setIndexingResult(result));
}
}
} else {
if (onActionErrorConsumer != null) {
onActionErrorConsumer.accept(new OnActionErrorOptions<>(action.getAction())
.setIndexingResult(result));
}
}
}
}
if (has503) {
currentRetryDelay = calculateRetryDelay(backoffCount.getAndIncrement());
} else {
backoffCount.set(0);
currentRetryDelay = Duration.ZERO;
}
if (!CoreUtils.isNullOrEmpty(actionsToRetry)) {
reinsertFailedActions(actionsToRetry);
}
}
private void reinsertFailedActions(Deque<TryTrackingIndexAction<T>> actionsToRetry) {
synchronized (actionsMutex) {
actionsToRetry.descendingIterator().forEachRemaining(actions::add);
}
}
private void reinsertFailedActions(List<TryTrackingIndexAction<T>> actionsToRetry) {
synchronized (actionsMutex) {
for (int i = actionsToRetry.size() - 1; i >= 0; i--) {
this.actions.push(actionsToRetry.get(i));
}
}
}
private boolean batchAvailableForProcessing() {
return (actions.size() + inFlightActions.size()) >= batchActionCount;
}
private static boolean isSuccess(int statusCode) {
return statusCode == 200 || statusCode == 201;
}
private static boolean isRetryable(int statusCode) {
return statusCode == 409 || statusCode == 422 || statusCode == 503;
}
private Duration calculateRetryDelay(int backoffCount) {
long delayWithJitterInNanos = ThreadLocalRandom.current()
.nextLong((long) (throttlingDelay.toNanos() * (1 - JITTER_FACTOR)),
(long) (throttlingDelay.toNanos() * (1 + JITTER_FACTOR)));
return Duration.ofNanos(Math.min((1L << backoffCount) * delayWithJitterInNanos, maxThrottlingDelay.toNanos()));
}
private static RuntimeException createDocumentTooLargeException() {
return new RuntimeException("Document is too large to be indexed and won't be tried again.");
}
private static RuntimeException createDocumentHitRetryLimitException() {
return new RuntimeException("Document has reached retry limit and won't be tried again.");
}
} |
should there be a latch wait before calling close? or alternatively _Mono.using_ + subscribe? | public void sendSessionMessageAsync() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(sessionEnabledQueueName)
.buildAsyncClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message).subscribe(unused -> {
}, error -> {
System.err.println("Error occurred publishing batch: " + error);
}, () -> {
System.out.println("Send complete.");
});
sender.close();
} | sender.close(); | public void sendSessionMessageAsync() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(sessionEnabledQueueName)
.buildAsyncClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message).subscribe(unused -> {
}, error -> {
System.err.println("Error occurred publishing batch: " + error);
}, () -> {
System.out.println("Send complete.");
});
sender.close();
} | class ServiceBusSenderClientJavaDocCodeSamples {
/**
* Fully qualified namespace is the host name of the Service Bus resource. It can be found by navigating to the
* Service Bus namespace and looking in the "Essentials" panel.
*/
private final String fullyQualifiedNamespace = System.getenv("AZURE_SERVICEBUS_FULLY_QUALIFIED_DOMAIN_NAME");
private final String queueName = System.getenv("AZURE_SERVICEBUS_SAMPLE_QUEUE_NAME");
/**
* Name of a session-enabled queue in the Service Bus namespace.
*/
private final String sessionEnabledQueueName = System.getenv("AZURE_SERVICEBUS_SAMPLE_SESSION_QUEUE_NAME");
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderClient}.
*/
@Test
public void instantiate() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildClient();
sender.sendMessage(new ServiceBusMessage("Foo bar"));
sender.close();
}
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderAsyncClient}.
*/
@Test
public void instantiateAsync() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildAsyncClient();
asyncSender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*
* @throws IllegalArgumentException if a message is too large.
*/
@Test
public void sendBatch() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildClient();
List<ServiceBusMessage> messages = Arrays.asList(
new ServiceBusMessage("test-1"),
new ServiceBusMessage("test-2"));
ServiceBusMessageBatch batch = sender.createMessageBatch();
for (ServiceBusMessage message : messages) {
if (batch.tryAddMessage(message)) {
continue;
}
sender.sendMessages(batch);
batch = sender.createMessageBatch();
if (!batch.tryAddMessage(message)) {
throw new IllegalArgumentException("Message is too large for an empty batch.");
}
}
if (batch.getCount() > 0) {
sender.sendMessages(batch);
}
sender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*/
@Test
public void sendBatchAsync() {
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildAsyncClient();
asyncSender.createMessageBatch().flatMap(batch -> {
batch.tryAddMessage(new ServiceBusMessage("test-1"));
batch.tryAddMessage(new ServiceBusMessage("test-2"));
return asyncSender.sendMessages(batch);
}).subscribe(unused -> {
}, error -> {
System.err.println("Error occurred while sending batch:" + error);
}, () -> {
System.out.println("Send complete.");
});
asyncSender.close();
}
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*
* @throws IllegalArgumentException if an message is too large for an empty batch.
*/
@Test
public void batchSizeLimited() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildClient();
ServiceBusMessage firstMessage = new ServiceBusMessage("message-1");
firstMessage.getApplicationProperties().put("telemetry", "latency");
ServiceBusMessage secondMessage = new ServiceBusMessage("message-2");
secondMessage.getApplicationProperties().put("telemetry", "cpu-temperature");
ServiceBusMessage thirdMessage = new ServiceBusMessage("message-3");
thirdMessage.getApplicationProperties().put("telemetry", "fps");
List<ServiceBusMessage> telemetryMessages = Arrays.asList(firstMessage, secondMessage, thirdMessage);
CreateMessageBatchOptions options = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(256);
ServiceBusMessageBatch currentBatch = sender.createMessageBatch(options);
for (ServiceBusMessage message : telemetryMessages) {
if (!currentBatch.tryAddMessage(message)) {
sender.sendMessages(currentBatch);
currentBatch = sender.createMessageBatch(options);
if (!currentBatch.tryAddMessage(message)) {
throw new IllegalArgumentException("Message is too large for an empty batch.");
}
}
}
if (currentBatch.getCount() > 0) {
sender.sendMessages(currentBatch);
}
sender.close();
}
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*/
@Test
public void batchSizeLimitedAsync() {
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildAsyncClient();
ServiceBusMessage firstMessage = new ServiceBusMessage(BinaryData.fromBytes("92".getBytes(UTF_8)));
firstMessage.getApplicationProperties().put("telemetry", "latency");
ServiceBusMessage secondMessage = new ServiceBusMessage(BinaryData.fromBytes("98".getBytes(UTF_8)));
secondMessage.getApplicationProperties().put("telemetry", "cpu-temperature");
Flux<ServiceBusMessage> telemetryMessages = Flux.just(firstMessage, secondMessage);
CreateMessageBatchOptions options = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(256);
AtomicReference<ServiceBusMessageBatch> currentBatch = new AtomicReference<>();
Mono<ServiceBusMessageBatch> sendBatchAndGetCurrentBatchOperation = Mono.defer(() -> {
ServiceBusMessageBatch batch = currentBatch.get();
if (batch == null) {
return asyncSender.createMessageBatch(options);
}
if (batch.getCount() > 0) {
return asyncSender.sendMessages(batch).then(
asyncSender.createMessageBatch(options)
.handle((ServiceBusMessageBatch newBatch, SynchronousSink<ServiceBusMessageBatch> sink) -> {
if (!currentBatch.compareAndSet(batch, newBatch)) {
sink.error(new IllegalStateException(
"Expected that the object in currentBatch was batch. But it is not."));
} else {
sink.next(newBatch);
}
}));
} else {
return Mono.just(batch);
}
});
Flux<Void> sendMessagesOperation = telemetryMessages.flatMap(message -> {
return sendBatchAndGetCurrentBatchOperation.flatMap(batch -> {
if (batch.tryAddMessage(message)) {
return Mono.empty();
} else {
return sendBatchAndGetCurrentBatchOperation
.handle((ServiceBusMessageBatch newBatch, SynchronousSink<Void> sink) -> {
if (!newBatch.tryAddMessage(message)) {
sink.error(new IllegalArgumentException(
"Message is too large to fit in an empty batch."));
} else {
sink.complete();
}
});
}
});
});
Disposable disposable = sendMessagesOperation.then(sendBatchAndGetCurrentBatchOperation)
.subscribe(batch -> {
System.out.println("Last batch should be empty: " + batch.getCount());
}, error -> {
System.err.println("Error sending telemetry messages: " + error);
}, () -> {
System.out.println("Completed.");
asyncSender.close();
});
disposable.dispose();
}
/**
* Create a session message.
*/
@Test
public void sendSessionMessage() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(sessionEnabledQueueName)
.buildClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message);
sender.close();
}
/**
* Create a session message.
*/
@Test
} | class ServiceBusSenderClientJavaDocCodeSamples {
/**
* Fully qualified namespace is the host name of the Service Bus resource. It can be found by navigating to the
* Service Bus namespace and looking in the "Essentials" panel.
*/
private final String fullyQualifiedNamespace = System.getenv("AZURE_SERVICEBUS_FULLY_QUALIFIED_DOMAIN_NAME");
private final String queueName = System.getenv("AZURE_SERVICEBUS_SAMPLE_QUEUE_NAME");
/**
* Name of a session-enabled queue in the Service Bus namespace.
*/
private final String sessionEnabledQueueName = System.getenv("AZURE_SERVICEBUS_SAMPLE_SESSION_QUEUE_NAME");
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderClient}.
*/
@Test
public void instantiate() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildClient();
sender.sendMessage(new ServiceBusMessage("Foo bar"));
sender.close();
}
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderAsyncClient}.
*/
@Test
public void instantiateAsync() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildAsyncClient();
asyncSender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*
* @throws IllegalArgumentException if a message is too large.
*/
@Test
public void sendBatch() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildClient();
List<ServiceBusMessage> messages = Arrays.asList(
new ServiceBusMessage("test-1"),
new ServiceBusMessage("test-2"));
ServiceBusMessageBatch batch = sender.createMessageBatch();
for (ServiceBusMessage message : messages) {
if (batch.tryAddMessage(message)) {
continue;
}
sender.sendMessages(batch);
batch = sender.createMessageBatch();
if (!batch.tryAddMessage(message)) {
throw new IllegalArgumentException("Message is too large for an empty batch.");
}
}
if (batch.getCount() > 0) {
sender.sendMessages(batch);
}
sender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*/
@Test
public void sendBatchAsync() {
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildAsyncClient();
asyncSender.createMessageBatch().flatMap(batch -> {
batch.tryAddMessage(new ServiceBusMessage("test-1"));
batch.tryAddMessage(new ServiceBusMessage("test-2"));
return asyncSender.sendMessages(batch);
}).subscribe(unused -> {
}, error -> {
System.err.println("Error occurred while sending batch:" + error);
}, () -> {
System.out.println("Send complete.");
});
asyncSender.close();
}
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*
* @throws IllegalArgumentException if an message is too large for an empty batch.
*/
@Test
public void batchSizeLimited() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildClient();
ServiceBusMessage firstMessage = new ServiceBusMessage("message-1");
firstMessage.getApplicationProperties().put("telemetry", "latency");
ServiceBusMessage secondMessage = new ServiceBusMessage("message-2");
secondMessage.getApplicationProperties().put("telemetry", "cpu-temperature");
ServiceBusMessage thirdMessage = new ServiceBusMessage("message-3");
thirdMessage.getApplicationProperties().put("telemetry", "fps");
List<ServiceBusMessage> telemetryMessages = Arrays.asList(firstMessage, secondMessage, thirdMessage);
CreateMessageBatchOptions options = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(256);
ServiceBusMessageBatch currentBatch = sender.createMessageBatch(options);
for (ServiceBusMessage message : telemetryMessages) {
if (!currentBatch.tryAddMessage(message)) {
sender.sendMessages(currentBatch);
currentBatch = sender.createMessageBatch(options);
if (!currentBatch.tryAddMessage(message)) {
throw new IllegalArgumentException("Message is too large for an empty batch.");
}
}
}
if (currentBatch.getCount() > 0) {
sender.sendMessages(currentBatch);
}
sender.close();
}
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*/
@Test
public void batchSizeLimitedAsync() {
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildAsyncClient();
ServiceBusMessage firstMessage = new ServiceBusMessage(BinaryData.fromBytes("92".getBytes(UTF_8)));
firstMessage.getApplicationProperties().put("telemetry", "latency");
ServiceBusMessage secondMessage = new ServiceBusMessage(BinaryData.fromBytes("98".getBytes(UTF_8)));
secondMessage.getApplicationProperties().put("telemetry", "cpu-temperature");
Flux<ServiceBusMessage> telemetryMessages = Flux.just(firstMessage, secondMessage);
CreateMessageBatchOptions options = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(256);
AtomicReference<ServiceBusMessageBatch> currentBatch = new AtomicReference<>();
Mono<ServiceBusMessageBatch> sendBatchAndGetCurrentBatchOperation = Mono.defer(() -> {
ServiceBusMessageBatch batch = currentBatch.get();
if (batch == null) {
return asyncSender.createMessageBatch(options);
}
if (batch.getCount() > 0) {
return asyncSender.sendMessages(batch).then(
asyncSender.createMessageBatch(options)
.handle((ServiceBusMessageBatch newBatch, SynchronousSink<ServiceBusMessageBatch> sink) -> {
if (!currentBatch.compareAndSet(batch, newBatch)) {
sink.error(new IllegalStateException(
"Expected that the object in currentBatch was batch. But it is not."));
} else {
sink.next(newBatch);
}
}));
} else {
return Mono.just(batch);
}
});
Flux<Void> sendMessagesOperation = telemetryMessages.flatMap(message -> {
return sendBatchAndGetCurrentBatchOperation.flatMap(batch -> {
if (batch.tryAddMessage(message)) {
return Mono.empty();
} else {
return sendBatchAndGetCurrentBatchOperation
.handle((ServiceBusMessageBatch newBatch, SynchronousSink<Void> sink) -> {
if (!newBatch.tryAddMessage(message)) {
sink.error(new IllegalArgumentException(
"Message is too large to fit in an empty batch."));
} else {
sink.complete();
}
});
}
});
});
Disposable disposable = sendMessagesOperation.then(sendBatchAndGetCurrentBatchOperation)
.subscribe(batch -> {
System.out.println("Last batch should be empty: " + batch.getCount());
}, error -> {
System.err.println("Error sending telemetry messages: " + error);
}, () -> {
System.out.println("Completed.");
asyncSender.close();
});
disposable.dispose();
}
/**
* Create a session message.
*/
@Test
public void sendSessionMessage() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(sessionEnabledQueueName)
.buildClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message);
sender.close();
}
/**
* Create a session message.
*/
@Test
} |
I wanted to show usage samples and remind users to close the sender. Not some runnable code but I will add some comments. | public void sendSessionMessageAsync() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(sessionEnabledQueueName)
.buildAsyncClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message).subscribe(unused -> {
}, error -> {
System.err.println("Error occurred publishing batch: " + error);
}, () -> {
System.out.println("Send complete.");
});
sender.close();
} | sender.close(); | public void sendSessionMessageAsync() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(sessionEnabledQueueName)
.buildAsyncClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message).subscribe(unused -> {
}, error -> {
System.err.println("Error occurred publishing batch: " + error);
}, () -> {
System.out.println("Send complete.");
});
sender.close();
} | class ServiceBusSenderClientJavaDocCodeSamples {
/**
* Fully qualified namespace is the host name of the Service Bus resource. It can be found by navigating to the
* Service Bus namespace and looking in the "Essentials" panel.
*/
private final String fullyQualifiedNamespace = System.getenv("AZURE_SERVICEBUS_FULLY_QUALIFIED_DOMAIN_NAME");
private final String queueName = System.getenv("AZURE_SERVICEBUS_SAMPLE_QUEUE_NAME");
/**
* Name of a session-enabled queue in the Service Bus namespace.
*/
private final String sessionEnabledQueueName = System.getenv("AZURE_SERVICEBUS_SAMPLE_SESSION_QUEUE_NAME");
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderClient}.
*/
@Test
public void instantiate() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildClient();
sender.sendMessage(new ServiceBusMessage("Foo bar"));
sender.close();
}
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderAsyncClient}.
*/
@Test
public void instantiateAsync() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildAsyncClient();
asyncSender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*
* @throws IllegalArgumentException if a message is too large.
*/
@Test
public void sendBatch() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildClient();
List<ServiceBusMessage> messages = Arrays.asList(
new ServiceBusMessage("test-1"),
new ServiceBusMessage("test-2"));
ServiceBusMessageBatch batch = sender.createMessageBatch();
for (ServiceBusMessage message : messages) {
if (batch.tryAddMessage(message)) {
continue;
}
sender.sendMessages(batch);
batch = sender.createMessageBatch();
if (!batch.tryAddMessage(message)) {
throw new IllegalArgumentException("Message is too large for an empty batch.");
}
}
if (batch.getCount() > 0) {
sender.sendMessages(batch);
}
sender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*/
@Test
public void sendBatchAsync() {
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildAsyncClient();
asyncSender.createMessageBatch().flatMap(batch -> {
batch.tryAddMessage(new ServiceBusMessage("test-1"));
batch.tryAddMessage(new ServiceBusMessage("test-2"));
return asyncSender.sendMessages(batch);
}).subscribe(unused -> {
}, error -> {
System.err.println("Error occurred while sending batch:" + error);
}, () -> {
System.out.println("Send complete.");
});
asyncSender.close();
}
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*
* @throws IllegalArgumentException if an message is too large for an empty batch.
*/
@Test
public void batchSizeLimited() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildClient();
ServiceBusMessage firstMessage = new ServiceBusMessage("message-1");
firstMessage.getApplicationProperties().put("telemetry", "latency");
ServiceBusMessage secondMessage = new ServiceBusMessage("message-2");
secondMessage.getApplicationProperties().put("telemetry", "cpu-temperature");
ServiceBusMessage thirdMessage = new ServiceBusMessage("message-3");
thirdMessage.getApplicationProperties().put("telemetry", "fps");
List<ServiceBusMessage> telemetryMessages = Arrays.asList(firstMessage, secondMessage, thirdMessage);
CreateMessageBatchOptions options = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(256);
ServiceBusMessageBatch currentBatch = sender.createMessageBatch(options);
for (ServiceBusMessage message : telemetryMessages) {
if (!currentBatch.tryAddMessage(message)) {
sender.sendMessages(currentBatch);
currentBatch = sender.createMessageBatch(options);
if (!currentBatch.tryAddMessage(message)) {
throw new IllegalArgumentException("Message is too large for an empty batch.");
}
}
}
if (currentBatch.getCount() > 0) {
sender.sendMessages(currentBatch);
}
sender.close();
}
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*/
@Test
public void batchSizeLimitedAsync() {
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildAsyncClient();
ServiceBusMessage firstMessage = new ServiceBusMessage(BinaryData.fromBytes("92".getBytes(UTF_8)));
firstMessage.getApplicationProperties().put("telemetry", "latency");
ServiceBusMessage secondMessage = new ServiceBusMessage(BinaryData.fromBytes("98".getBytes(UTF_8)));
secondMessage.getApplicationProperties().put("telemetry", "cpu-temperature");
Flux<ServiceBusMessage> telemetryMessages = Flux.just(firstMessage, secondMessage);
CreateMessageBatchOptions options = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(256);
AtomicReference<ServiceBusMessageBatch> currentBatch = new AtomicReference<>();
Mono<ServiceBusMessageBatch> sendBatchAndGetCurrentBatchOperation = Mono.defer(() -> {
ServiceBusMessageBatch batch = currentBatch.get();
if (batch == null) {
return asyncSender.createMessageBatch(options);
}
if (batch.getCount() > 0) {
return asyncSender.sendMessages(batch).then(
asyncSender.createMessageBatch(options)
.handle((ServiceBusMessageBatch newBatch, SynchronousSink<ServiceBusMessageBatch> sink) -> {
if (!currentBatch.compareAndSet(batch, newBatch)) {
sink.error(new IllegalStateException(
"Expected that the object in currentBatch was batch. But it is not."));
} else {
sink.next(newBatch);
}
}));
} else {
return Mono.just(batch);
}
});
Flux<Void> sendMessagesOperation = telemetryMessages.flatMap(message -> {
return sendBatchAndGetCurrentBatchOperation.flatMap(batch -> {
if (batch.tryAddMessage(message)) {
return Mono.empty();
} else {
return sendBatchAndGetCurrentBatchOperation
.handle((ServiceBusMessageBatch newBatch, SynchronousSink<Void> sink) -> {
if (!newBatch.tryAddMessage(message)) {
sink.error(new IllegalArgumentException(
"Message is too large to fit in an empty batch."));
} else {
sink.complete();
}
});
}
});
});
Disposable disposable = sendMessagesOperation.then(sendBatchAndGetCurrentBatchOperation)
.subscribe(batch -> {
System.out.println("Last batch should be empty: " + batch.getCount());
}, error -> {
System.err.println("Error sending telemetry messages: " + error);
}, () -> {
System.out.println("Completed.");
asyncSender.close();
});
disposable.dispose();
}
/**
* Create a session message.
*/
@Test
public void sendSessionMessage() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(sessionEnabledQueueName)
.buildClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message);
sender.close();
}
/**
* Create a session message.
*/
@Test
} | class ServiceBusSenderClientJavaDocCodeSamples {
/**
* Fully qualified namespace is the host name of the Service Bus resource. It can be found by navigating to the
* Service Bus namespace and looking in the "Essentials" panel.
*/
private final String fullyQualifiedNamespace = System.getenv("AZURE_SERVICEBUS_FULLY_QUALIFIED_DOMAIN_NAME");
private final String queueName = System.getenv("AZURE_SERVICEBUS_SAMPLE_QUEUE_NAME");
/**
* Name of a session-enabled queue in the Service Bus namespace.
*/
private final String sessionEnabledQueueName = System.getenv("AZURE_SERVICEBUS_SAMPLE_SESSION_QUEUE_NAME");
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderClient}.
*/
@Test
public void instantiate() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildClient();
sender.sendMessage(new ServiceBusMessage("Foo bar"));
sender.close();
}
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderAsyncClient}.
*/
@Test
public void instantiateAsync() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildAsyncClient();
asyncSender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*
* @throws IllegalArgumentException if a message is too large.
*/
@Test
public void sendBatch() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildClient();
List<ServiceBusMessage> messages = Arrays.asList(
new ServiceBusMessage("test-1"),
new ServiceBusMessage("test-2"));
ServiceBusMessageBatch batch = sender.createMessageBatch();
for (ServiceBusMessage message : messages) {
if (batch.tryAddMessage(message)) {
continue;
}
sender.sendMessages(batch);
batch = sender.createMessageBatch();
if (!batch.tryAddMessage(message)) {
throw new IllegalArgumentException("Message is too large for an empty batch.");
}
}
if (batch.getCount() > 0) {
sender.sendMessages(batch);
}
sender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*/
@Test
public void sendBatchAsync() {
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildAsyncClient();
asyncSender.createMessageBatch().flatMap(batch -> {
batch.tryAddMessage(new ServiceBusMessage("test-1"));
batch.tryAddMessage(new ServiceBusMessage("test-2"));
return asyncSender.sendMessages(batch);
}).subscribe(unused -> {
}, error -> {
System.err.println("Error occurred while sending batch:" + error);
}, () -> {
System.out.println("Send complete.");
});
asyncSender.close();
}
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*
* @throws IllegalArgumentException if an message is too large for an empty batch.
*/
@Test
public void batchSizeLimited() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildClient();
ServiceBusMessage firstMessage = new ServiceBusMessage("message-1");
firstMessage.getApplicationProperties().put("telemetry", "latency");
ServiceBusMessage secondMessage = new ServiceBusMessage("message-2");
secondMessage.getApplicationProperties().put("telemetry", "cpu-temperature");
ServiceBusMessage thirdMessage = new ServiceBusMessage("message-3");
thirdMessage.getApplicationProperties().put("telemetry", "fps");
List<ServiceBusMessage> telemetryMessages = Arrays.asList(firstMessage, secondMessage, thirdMessage);
CreateMessageBatchOptions options = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(256);
ServiceBusMessageBatch currentBatch = sender.createMessageBatch(options);
for (ServiceBusMessage message : telemetryMessages) {
if (!currentBatch.tryAddMessage(message)) {
sender.sendMessages(currentBatch);
currentBatch = sender.createMessageBatch(options);
if (!currentBatch.tryAddMessage(message)) {
throw new IllegalArgumentException("Message is too large for an empty batch.");
}
}
}
if (currentBatch.getCount() > 0) {
sender.sendMessages(currentBatch);
}
sender.close();
}
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*/
@Test
public void batchSizeLimitedAsync() {
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildAsyncClient();
ServiceBusMessage firstMessage = new ServiceBusMessage(BinaryData.fromBytes("92".getBytes(UTF_8)));
firstMessage.getApplicationProperties().put("telemetry", "latency");
ServiceBusMessage secondMessage = new ServiceBusMessage(BinaryData.fromBytes("98".getBytes(UTF_8)));
secondMessage.getApplicationProperties().put("telemetry", "cpu-temperature");
Flux<ServiceBusMessage> telemetryMessages = Flux.just(firstMessage, secondMessage);
CreateMessageBatchOptions options = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(256);
AtomicReference<ServiceBusMessageBatch> currentBatch = new AtomicReference<>();
Mono<ServiceBusMessageBatch> sendBatchAndGetCurrentBatchOperation = Mono.defer(() -> {
ServiceBusMessageBatch batch = currentBatch.get();
if (batch == null) {
return asyncSender.createMessageBatch(options);
}
if (batch.getCount() > 0) {
return asyncSender.sendMessages(batch).then(
asyncSender.createMessageBatch(options)
.handle((ServiceBusMessageBatch newBatch, SynchronousSink<ServiceBusMessageBatch> sink) -> {
if (!currentBatch.compareAndSet(batch, newBatch)) {
sink.error(new IllegalStateException(
"Expected that the object in currentBatch was batch. But it is not."));
} else {
sink.next(newBatch);
}
}));
} else {
return Mono.just(batch);
}
});
Flux<Void> sendMessagesOperation = telemetryMessages.flatMap(message -> {
return sendBatchAndGetCurrentBatchOperation.flatMap(batch -> {
if (batch.tryAddMessage(message)) {
return Mono.empty();
} else {
return sendBatchAndGetCurrentBatchOperation
.handle((ServiceBusMessageBatch newBatch, SynchronousSink<Void> sink) -> {
if (!newBatch.tryAddMessage(message)) {
sink.error(new IllegalArgumentException(
"Message is too large to fit in an empty batch."));
} else {
sink.complete();
}
});
}
});
});
Disposable disposable = sendMessagesOperation.then(sendBatchAndGetCurrentBatchOperation)
.subscribe(batch -> {
System.out.println("Last batch should be empty: " + batch.getCount());
}, error -> {
System.err.println("Error sending telemetry messages: " + error);
}, () -> {
System.out.println("Completed.");
asyncSender.close();
});
disposable.dispose();
}
/**
* Create a session message.
*/
@Test
public void sendSessionMessage() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(sessionEnabledQueueName)
.buildClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message);
sender.close();
}
/**
* Create a session message.
*/
@Test
} |
I see, sounds good | public void sendSessionMessageAsync() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(sessionEnabledQueueName)
.buildAsyncClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message).subscribe(unused -> {
}, error -> {
System.err.println("Error occurred publishing batch: " + error);
}, () -> {
System.out.println("Send complete.");
});
sender.close();
} | sender.close(); | public void sendSessionMessageAsync() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(sessionEnabledQueueName)
.buildAsyncClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message).subscribe(unused -> {
}, error -> {
System.err.println("Error occurred publishing batch: " + error);
}, () -> {
System.out.println("Send complete.");
});
sender.close();
} | class ServiceBusSenderClientJavaDocCodeSamples {
/**
* Fully qualified namespace is the host name of the Service Bus resource. It can be found by navigating to the
* Service Bus namespace and looking in the "Essentials" panel.
*/
private final String fullyQualifiedNamespace = System.getenv("AZURE_SERVICEBUS_FULLY_QUALIFIED_DOMAIN_NAME");
private final String queueName = System.getenv("AZURE_SERVICEBUS_SAMPLE_QUEUE_NAME");
/**
* Name of a session-enabled queue in the Service Bus namespace.
*/
private final String sessionEnabledQueueName = System.getenv("AZURE_SERVICEBUS_SAMPLE_SESSION_QUEUE_NAME");
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderClient}.
*/
@Test
public void instantiate() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildClient();
sender.sendMessage(new ServiceBusMessage("Foo bar"));
sender.close();
}
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderAsyncClient}.
*/
@Test
public void instantiateAsync() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildAsyncClient();
asyncSender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*
* @throws IllegalArgumentException if a message is too large.
*/
@Test
public void sendBatch() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildClient();
List<ServiceBusMessage> messages = Arrays.asList(
new ServiceBusMessage("test-1"),
new ServiceBusMessage("test-2"));
ServiceBusMessageBatch batch = sender.createMessageBatch();
for (ServiceBusMessage message : messages) {
if (batch.tryAddMessage(message)) {
continue;
}
sender.sendMessages(batch);
batch = sender.createMessageBatch();
if (!batch.tryAddMessage(message)) {
throw new IllegalArgumentException("Message is too large for an empty batch.");
}
}
if (batch.getCount() > 0) {
sender.sendMessages(batch);
}
sender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*/
@Test
public void sendBatchAsync() {
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildAsyncClient();
asyncSender.createMessageBatch().flatMap(batch -> {
batch.tryAddMessage(new ServiceBusMessage("test-1"));
batch.tryAddMessage(new ServiceBusMessage("test-2"));
return asyncSender.sendMessages(batch);
}).subscribe(unused -> {
}, error -> {
System.err.println("Error occurred while sending batch:" + error);
}, () -> {
System.out.println("Send complete.");
});
asyncSender.close();
}
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*
* @throws IllegalArgumentException if an message is too large for an empty batch.
*/
@Test
public void batchSizeLimited() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildClient();
ServiceBusMessage firstMessage = new ServiceBusMessage("message-1");
firstMessage.getApplicationProperties().put("telemetry", "latency");
ServiceBusMessage secondMessage = new ServiceBusMessage("message-2");
secondMessage.getApplicationProperties().put("telemetry", "cpu-temperature");
ServiceBusMessage thirdMessage = new ServiceBusMessage("message-3");
thirdMessage.getApplicationProperties().put("telemetry", "fps");
List<ServiceBusMessage> telemetryMessages = Arrays.asList(firstMessage, secondMessage, thirdMessage);
CreateMessageBatchOptions options = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(256);
ServiceBusMessageBatch currentBatch = sender.createMessageBatch(options);
for (ServiceBusMessage message : telemetryMessages) {
if (!currentBatch.tryAddMessage(message)) {
sender.sendMessages(currentBatch);
currentBatch = sender.createMessageBatch(options);
if (!currentBatch.tryAddMessage(message)) {
throw new IllegalArgumentException("Message is too large for an empty batch.");
}
}
}
if (currentBatch.getCount() > 0) {
sender.sendMessages(currentBatch);
}
sender.close();
}
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*/
@Test
public void batchSizeLimitedAsync() {
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildAsyncClient();
ServiceBusMessage firstMessage = new ServiceBusMessage(BinaryData.fromBytes("92".getBytes(UTF_8)));
firstMessage.getApplicationProperties().put("telemetry", "latency");
ServiceBusMessage secondMessage = new ServiceBusMessage(BinaryData.fromBytes("98".getBytes(UTF_8)));
secondMessage.getApplicationProperties().put("telemetry", "cpu-temperature");
Flux<ServiceBusMessage> telemetryMessages = Flux.just(firstMessage, secondMessage);
CreateMessageBatchOptions options = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(256);
AtomicReference<ServiceBusMessageBatch> currentBatch = new AtomicReference<>();
Mono<ServiceBusMessageBatch> sendBatchAndGetCurrentBatchOperation = Mono.defer(() -> {
ServiceBusMessageBatch batch = currentBatch.get();
if (batch == null) {
return asyncSender.createMessageBatch(options);
}
if (batch.getCount() > 0) {
return asyncSender.sendMessages(batch).then(
asyncSender.createMessageBatch(options)
.handle((ServiceBusMessageBatch newBatch, SynchronousSink<ServiceBusMessageBatch> sink) -> {
if (!currentBatch.compareAndSet(batch, newBatch)) {
sink.error(new IllegalStateException(
"Expected that the object in currentBatch was batch. But it is not."));
} else {
sink.next(newBatch);
}
}));
} else {
return Mono.just(batch);
}
});
Flux<Void> sendMessagesOperation = telemetryMessages.flatMap(message -> {
return sendBatchAndGetCurrentBatchOperation.flatMap(batch -> {
if (batch.tryAddMessage(message)) {
return Mono.empty();
} else {
return sendBatchAndGetCurrentBatchOperation
.handle((ServiceBusMessageBatch newBatch, SynchronousSink<Void> sink) -> {
if (!newBatch.tryAddMessage(message)) {
sink.error(new IllegalArgumentException(
"Message is too large to fit in an empty batch."));
} else {
sink.complete();
}
});
}
});
});
Disposable disposable = sendMessagesOperation.then(sendBatchAndGetCurrentBatchOperation)
.subscribe(batch -> {
System.out.println("Last batch should be empty: " + batch.getCount());
}, error -> {
System.err.println("Error sending telemetry messages: " + error);
}, () -> {
System.out.println("Completed.");
asyncSender.close();
});
disposable.dispose();
}
/**
* Create a session message.
*/
@Test
public void sendSessionMessage() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(sessionEnabledQueueName)
.buildClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message);
sender.close();
}
/**
* Create a session message.
*/
@Test
} | class ServiceBusSenderClientJavaDocCodeSamples {
/**
* Fully qualified namespace is the host name of the Service Bus resource. It can be found by navigating to the
* Service Bus namespace and looking in the "Essentials" panel.
*/
private final String fullyQualifiedNamespace = System.getenv("AZURE_SERVICEBUS_FULLY_QUALIFIED_DOMAIN_NAME");
private final String queueName = System.getenv("AZURE_SERVICEBUS_SAMPLE_QUEUE_NAME");
/**
* Name of a session-enabled queue in the Service Bus namespace.
*/
private final String sessionEnabledQueueName = System.getenv("AZURE_SERVICEBUS_SAMPLE_SESSION_QUEUE_NAME");
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderClient}.
*/
@Test
public void instantiate() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildClient();
sender.sendMessage(new ServiceBusMessage("Foo bar"));
sender.close();
}
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderAsyncClient}.
*/
@Test
public void instantiateAsync() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildAsyncClient();
asyncSender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*
* @throws IllegalArgumentException if a message is too large.
*/
@Test
public void sendBatch() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, credential)
.sender()
.queueName(queueName)
.buildClient();
List<ServiceBusMessage> messages = Arrays.asList(
new ServiceBusMessage("test-1"),
new ServiceBusMessage("test-2"));
ServiceBusMessageBatch batch = sender.createMessageBatch();
for (ServiceBusMessage message : messages) {
if (batch.tryAddMessage(message)) {
continue;
}
sender.sendMessages(batch);
batch = sender.createMessageBatch();
if (!batch.tryAddMessage(message)) {
throw new IllegalArgumentException("Message is too large for an empty batch.");
}
}
if (batch.getCount() > 0) {
sender.sendMessages(batch);
}
sender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*/
@Test
public void sendBatchAsync() {
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildAsyncClient();
asyncSender.createMessageBatch().flatMap(batch -> {
batch.tryAddMessage(new ServiceBusMessage("test-1"));
batch.tryAddMessage(new ServiceBusMessage("test-2"));
return asyncSender.sendMessages(batch);
}).subscribe(unused -> {
}, error -> {
System.err.println("Error occurred while sending batch:" + error);
}, () -> {
System.out.println("Send complete.");
});
asyncSender.close();
}
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*
* @throws IllegalArgumentException if an message is too large for an empty batch.
*/
@Test
public void batchSizeLimited() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildClient();
ServiceBusMessage firstMessage = new ServiceBusMessage("message-1");
firstMessage.getApplicationProperties().put("telemetry", "latency");
ServiceBusMessage secondMessage = new ServiceBusMessage("message-2");
secondMessage.getApplicationProperties().put("telemetry", "cpu-temperature");
ServiceBusMessage thirdMessage = new ServiceBusMessage("message-3");
thirdMessage.getApplicationProperties().put("telemetry", "fps");
List<ServiceBusMessage> telemetryMessages = Arrays.asList(firstMessage, secondMessage, thirdMessage);
CreateMessageBatchOptions options = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(256);
ServiceBusMessageBatch currentBatch = sender.createMessageBatch(options);
for (ServiceBusMessage message : telemetryMessages) {
if (!currentBatch.tryAddMessage(message)) {
sender.sendMessages(currentBatch);
currentBatch = sender.createMessageBatch(options);
if (!currentBatch.tryAddMessage(message)) {
throw new IllegalArgumentException("Message is too large for an empty batch.");
}
}
}
if (currentBatch.getCount() > 0) {
sender.sendMessages(currentBatch);
}
sender.close();
}
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*/
@Test
public void batchSizeLimitedAsync() {
ServiceBusSenderAsyncClient asyncSender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(queueName)
.buildAsyncClient();
ServiceBusMessage firstMessage = new ServiceBusMessage(BinaryData.fromBytes("92".getBytes(UTF_8)));
firstMessage.getApplicationProperties().put("telemetry", "latency");
ServiceBusMessage secondMessage = new ServiceBusMessage(BinaryData.fromBytes("98".getBytes(UTF_8)));
secondMessage.getApplicationProperties().put("telemetry", "cpu-temperature");
Flux<ServiceBusMessage> telemetryMessages = Flux.just(firstMessage, secondMessage);
CreateMessageBatchOptions options = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(256);
AtomicReference<ServiceBusMessageBatch> currentBatch = new AtomicReference<>();
Mono<ServiceBusMessageBatch> sendBatchAndGetCurrentBatchOperation = Mono.defer(() -> {
ServiceBusMessageBatch batch = currentBatch.get();
if (batch == null) {
return asyncSender.createMessageBatch(options);
}
if (batch.getCount() > 0) {
return asyncSender.sendMessages(batch).then(
asyncSender.createMessageBatch(options)
.handle((ServiceBusMessageBatch newBatch, SynchronousSink<ServiceBusMessageBatch> sink) -> {
if (!currentBatch.compareAndSet(batch, newBatch)) {
sink.error(new IllegalStateException(
"Expected that the object in currentBatch was batch. But it is not."));
} else {
sink.next(newBatch);
}
}));
} else {
return Mono.just(batch);
}
});
Flux<Void> sendMessagesOperation = telemetryMessages.flatMap(message -> {
return sendBatchAndGetCurrentBatchOperation.flatMap(batch -> {
if (batch.tryAddMessage(message)) {
return Mono.empty();
} else {
return sendBatchAndGetCurrentBatchOperation
.handle((ServiceBusMessageBatch newBatch, SynchronousSink<Void> sink) -> {
if (!newBatch.tryAddMessage(message)) {
sink.error(new IllegalArgumentException(
"Message is too large to fit in an empty batch."));
} else {
sink.complete();
}
});
}
});
});
Disposable disposable = sendMessagesOperation.then(sendBatchAndGetCurrentBatchOperation)
.subscribe(batch -> {
System.out.println("Last batch should be empty: " + batch.getCount());
}, error -> {
System.err.println("Error sending telemetry messages: " + error);
}, () -> {
System.out.println("Completed.");
asyncSender.close();
});
disposable.dispose();
}
/**
* Create a session message.
*/
@Test
public void sendSessionMessage() {
ServiceBusSenderClient sender = new ServiceBusClientBuilder()
.credential(fullyQualifiedNamespace, new DefaultAzureCredentialBuilder().build())
.sender()
.queueName(sessionEnabledQueueName)
.buildClient();
ServiceBusMessage message = new ServiceBusMessage("Hello world")
.setSessionId("greetings");
sender.sendMessage(message);
sender.close();
}
/**
* Create a session message.
*/
@Test
} |
This can throw NPE if `ttlSeconds` is not initialized. We should add a null check and return null if uninitialized. With this change, you can also remove the spotbugs suppression. | public Duration getTimeToLive() {
return Duration.ofSeconds(ttlSeconds.longValue());
} | return Duration.ofSeconds(ttlSeconds.longValue()); | public Duration getTimeToLive() {
return ttlSeconds == null ? null : Duration.ofSeconds(ttlSeconds.longValue());
} | class AcsRouterWorkerSelector {
/*
* Router Job Worker Selector Key
*/
@JsonProperty(value = "key")
private String key;
/*
* Router Job Worker Selector Label Operator
*/
@JsonProperty(value = "labelOperator")
private AcsRouterLabelOperator labelOperator;
/*
* Router Job Worker Selector Value
*/
@JsonProperty(value = "labelValue")
private Object labelValue;
/*
* Router Job Worker Selector Time to Live in Seconds
*/
@JsonProperty(value = "ttlSeconds")
private Float ttlSeconds;
/*
* Router Job Worker Selector State
*/
@JsonProperty(value = "state")
private AcsRouterWorkerSelectorState state;
/*
* Router Job Worker Selector Expiration Time
*/
@JsonProperty(value = "expirationTime")
private OffsetDateTime expirationTime;
/** Creates an instance of AcsRouterWorkerSelector class. */
public AcsRouterWorkerSelector() {}
/**
* Get the key property: Router Job Worker Selector Key.
*
* @return the key value.
*/
public String getKey() {
return this.key;
}
/**
* Set the key property: Router Job Worker Selector Key.
*
* @param key the key value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setKey(String key) {
this.key = key;
return this;
}
/**
* Get the labelOperator property: Router Job Worker Selector Label Operator.
*
* @return the labelOperator value.
*/
public AcsRouterLabelOperator getLabelOperator() {
return this.labelOperator;
}
/**
* Set the labelOperator property: Router Job Worker Selector Label Operator.
*
* @param labelOperator the labelOperator value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setLabelOperator(AcsRouterLabelOperator labelOperator) {
this.labelOperator = labelOperator;
return this;
}
/**
* Get the labelValue property: Router Job Worker Selector Value.
*
* @return the labelValue value.
*/
public Object getLabelValue() {
return this.labelValue;
}
/**
* Set the labelValue property: Router Job Worker Selector Value.
*
* @param labelValue the labelValue value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setLabelValue(Object labelValue) {
this.labelValue = labelValue;
return this;
}
/**
* Get the ttlSeconds property: Router Job Worker Selector Time to Live in Seconds.
*
* @return the ttlSeconds value.
*/
/**
* Set the ttlSeconds property: Router Job Worker Selector Time to Live in Seconds.
*
* @param ttlSeconds the ttlSeconds value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setTimeToLive(Duration ttlSeconds) {
this.ttlSeconds = (float) ttlSeconds.getSeconds();
return this;
}
/**
* Get the state property: Router Job Worker Selector State.
*
* @return the state value.
*/
public AcsRouterWorkerSelectorState getState() {
return this.state;
}
/**
* Set the state property: Router Job Worker Selector State.
*
* @param state the state value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setState(AcsRouterWorkerSelectorState state) {
this.state = state;
return this;
}
/**
* Get the expirationTime property: Router Job Worker Selector Expiration Time.
*
* @return the expirationTime value.
*/
public OffsetDateTime getExpirationTime() {
return this.expirationTime;
}
/**
* Set the expirationTime property: Router Job Worker Selector Expiration Time.
*
* @param expirationTime the expirationTime value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setExpirationTime(OffsetDateTime expirationTime) {
this.expirationTime = expirationTime;
return this;
}
} | class AcsRouterWorkerSelector {
/*
* Router Job Worker Selector Key
*/
@JsonProperty(value = "key")
private String key;
/*
* Router Job Worker Selector Label Operator
*/
@JsonProperty(value = "labelOperator")
private AcsRouterLabelOperator labelOperator;
/*
* Router Job Worker Selector Value
*/
@JsonProperty(value = "labelValue")
private Object labelValue;
/*
* Router Job Worker Selector Time to Live in Seconds
*/
@JsonProperty(value = "ttlSeconds")
private Float ttlSeconds;
/*
* Router Job Worker Selector State
*/
@JsonProperty(value = "state")
private AcsRouterWorkerSelectorState state;
/*
* Router Job Worker Selector Expiration Time
*/
@JsonProperty(value = "expirationTime")
private OffsetDateTime expirationTime;
/** Creates an instance of AcsRouterWorkerSelector class. */
public AcsRouterWorkerSelector() {}
/**
* Get the key property: Router Job Worker Selector Key.
*
* @return the key value.
*/
public String getKey() {
return this.key;
}
/**
* Set the key property: Router Job Worker Selector Key.
*
* @param key the key value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setKey(String key) {
this.key = key;
return this;
}
/**
* Get the labelOperator property: Router Job Worker Selector Label Operator.
*
* @return the labelOperator value.
*/
public AcsRouterLabelOperator getLabelOperator() {
return this.labelOperator;
}
/**
* Set the labelOperator property: Router Job Worker Selector Label Operator.
*
* @param labelOperator the labelOperator value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setLabelOperator(AcsRouterLabelOperator labelOperator) {
this.labelOperator = labelOperator;
return this;
}
/**
* Get the labelValue property: Router Job Worker Selector Value.
*
* @return the labelValue value.
*/
public Object getLabelValue() {
return this.labelValue;
}
/**
* Set the labelValue property: Router Job Worker Selector Value.
*
* @param labelValue the labelValue value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setLabelValue(Object labelValue) {
this.labelValue = labelValue;
return this;
}
/**
* Get the ttlSeconds property: Router Job Worker Selector Time to Live in Seconds.
*
* @return the ttlSeconds value.
*/
/**
* Set the timeToLive property: Router Job Worker Selector Time to Live in Seconds.
*
* @param timeToLive the timeToLive value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setTimeToLive(Duration timeToLive) {
if (timeToLive != null) {
this.ttlSeconds = (float) timeToLive.getSeconds();
}
return this;
}
/**
* Get the state property: Router Job Worker Selector State.
*
* @return the state value.
*/
public AcsRouterWorkerSelectorState getState() {
return this.state;
}
/**
* Set the state property: Router Job Worker Selector State.
*
* @param state the state value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setState(AcsRouterWorkerSelectorState state) {
this.state = state;
return this;
}
/**
* Get the expirationTime property: Router Job Worker Selector Expiration Time.
*
* @return the expirationTime value.
*/
public OffsetDateTime getExpirationTime() {
return this.expirationTime;
}
/**
* Set the expirationTime property: Router Job Worker Selector Expiration Time.
*
* @param expirationTime the expirationTime value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setExpirationTime(OffsetDateTime expirationTime) {
this.expirationTime = expirationTime;
return this;
}
} |
Is this conversion needed? Can the swagger be updated to use the [common error type](https://github.com/Azure/azure-rest-api-specs/blob/main/specification/common-types/data-plane/v1/types.json#L48) which will handle the mapping to ResponseError? | public AcsRouterJobClassificationFailedEventData setErrors(List<ResponseError> errors) {
this.errors =
errors.stream()
.map(e -> new AcsRouterCommunicationError().setCode(e.getCode()).setMessage(e.getMessage()))
.collect(Collectors.toList());
return this;
} | return this; | public AcsRouterJobClassificationFailedEventData setErrors(List<ResponseError> errors) {
this.errors =
errors.stream()
.map(e -> new AcsRouterCommunicationError().setCode(e.getCode()).setMessage(e.getMessage()))
.collect(Collectors.toList());
return this;
} | class AcsRouterJobClassificationFailedEventData extends AcsRouterJobEventData {
/*
* Router Job Classification Policy Id
*/
@JsonProperty(value = "classificationPolicyId")
private String classificationPolicyId;
/*
* Router Job Classification Failed Errors
*/
@JsonProperty(value = "errors")
private List<AcsRouterCommunicationError> errors;
/** Creates an instance of AcsRouterJobClassificationFailedEventData class. */
public AcsRouterJobClassificationFailedEventData() {}
/**
* Get the classificationPolicyId property: Router Job Classification Policy Id.
*
* @return the classificationPolicyId value.
*/
public String getClassificationPolicyId() {
return this.classificationPolicyId;
}
/**
* Set the classificationPolicyId property: Router Job Classification Policy Id.
*
* @param classificationPolicyId the classificationPolicyId value to set.
* @return the AcsRouterJobClassificationFailedEventData object itself.
*/
public AcsRouterJobClassificationFailedEventData setClassificationPolicyId(String classificationPolicyId) {
this.classificationPolicyId = classificationPolicyId;
return this;
}
/**
* Get the errors property: Router Job Classification Failed Errors.
*
* @return the errors value.
*/
public List<ResponseError> getErrors() {
return this.errors.stream()
.map(e -> new ResponseError(e.getCode(), e.getMessage()))
.collect(Collectors.toList());
}
/**
* Set the errors property: Router Job Classification Failed Errors.
*
* @param errors the errors value to set.
* @return the AcsRouterJobClassificationFailedEventData object itself.
*/
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setQueueId(String queueId) {
super.setQueueId(queueId);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setLabels(Map<String, String> labels) {
super.setLabels(labels);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setJobId(String jobId) {
super.setJobId(jobId);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setChannelReference(String channelReference) {
super.setChannelReference(channelReference);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setChannelId(String channelId) {
super.setChannelId(channelId);
return this;
}
} | class AcsRouterJobClassificationFailedEventData extends AcsRouterJobEventData {
/*
* Router Job Classification Policy Id
*/
@JsonProperty(value = "classificationPolicyId")
private String classificationPolicyId;
/*
* Router Job Classification Failed Errors
*/
@JsonProperty(value = "errors")
private List<AcsRouterCommunicationError> errors;
/** Creates an instance of AcsRouterJobClassificationFailedEventData class. */
public AcsRouterJobClassificationFailedEventData() {}
/**
* Get the classificationPolicyId property: Router Job Classification Policy Id.
*
* @return the classificationPolicyId value.
*/
public String getClassificationPolicyId() {
return this.classificationPolicyId;
}
/**
* Set the classificationPolicyId property: Router Job Classification Policy Id.
*
* @param classificationPolicyId the classificationPolicyId value to set.
* @return the AcsRouterJobClassificationFailedEventData object itself.
*/
public AcsRouterJobClassificationFailedEventData setClassificationPolicyId(String classificationPolicyId) {
this.classificationPolicyId = classificationPolicyId;
return this;
}
/**
* Get the errors property: Router Job Classification Failed Errors.
*
* @return the errors value.
*/
public List<ResponseError> getErrors() {
return this.errors.stream()
.map(e -> new ResponseError(e.getCode(), e.getMessage()))
.collect(Collectors.toList());
}
/**
* Set the errors property: Router Job Classification Failed Errors.
*
* @param errors the errors value to set.
* @return the AcsRouterJobClassificationFailedEventData object itself.
*/
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setQueueId(String queueId) {
super.setQueueId(queueId);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setLabels(Map<String, String> labels) {
super.setLabels(labels);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setJobId(String jobId) {
super.setJobId(jobId);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setChannelReference(String channelReference) {
super.setChannelReference(channelReference);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setChannelId(String channelId) {
super.setChannelId(channelId);
return this;
}
} |
Ah, sure. I thought this was `float`, my mistake. | public Duration getTimeToLive() {
return Duration.ofSeconds(ttlSeconds.longValue());
} | return Duration.ofSeconds(ttlSeconds.longValue()); | public Duration getTimeToLive() {
return ttlSeconds == null ? null : Duration.ofSeconds(ttlSeconds.longValue());
} | class AcsRouterWorkerSelector {
/*
* Router Job Worker Selector Key
*/
@JsonProperty(value = "key")
private String key;
/*
* Router Job Worker Selector Label Operator
*/
@JsonProperty(value = "labelOperator")
private AcsRouterLabelOperator labelOperator;
/*
* Router Job Worker Selector Value
*/
@JsonProperty(value = "labelValue")
private Object labelValue;
/*
* Router Job Worker Selector Time to Live in Seconds
*/
@JsonProperty(value = "ttlSeconds")
private Float ttlSeconds;
/*
* Router Job Worker Selector State
*/
@JsonProperty(value = "state")
private AcsRouterWorkerSelectorState state;
/*
* Router Job Worker Selector Expiration Time
*/
@JsonProperty(value = "expirationTime")
private OffsetDateTime expirationTime;
/** Creates an instance of AcsRouterWorkerSelector class. */
public AcsRouterWorkerSelector() {}
/**
* Get the key property: Router Job Worker Selector Key.
*
* @return the key value.
*/
public String getKey() {
return this.key;
}
/**
* Set the key property: Router Job Worker Selector Key.
*
* @param key the key value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setKey(String key) {
this.key = key;
return this;
}
/**
* Get the labelOperator property: Router Job Worker Selector Label Operator.
*
* @return the labelOperator value.
*/
public AcsRouterLabelOperator getLabelOperator() {
return this.labelOperator;
}
/**
* Set the labelOperator property: Router Job Worker Selector Label Operator.
*
* @param labelOperator the labelOperator value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setLabelOperator(AcsRouterLabelOperator labelOperator) {
this.labelOperator = labelOperator;
return this;
}
/**
* Get the labelValue property: Router Job Worker Selector Value.
*
* @return the labelValue value.
*/
public Object getLabelValue() {
return this.labelValue;
}
/**
* Set the labelValue property: Router Job Worker Selector Value.
*
* @param labelValue the labelValue value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setLabelValue(Object labelValue) {
this.labelValue = labelValue;
return this;
}
/**
* Get the ttlSeconds property: Router Job Worker Selector Time to Live in Seconds.
*
* @return the ttlSeconds value.
*/
/**
* Set the ttlSeconds property: Router Job Worker Selector Time to Live in Seconds.
*
* @param ttlSeconds the ttlSeconds value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setTimeToLive(Duration ttlSeconds) {
this.ttlSeconds = (float) ttlSeconds.getSeconds();
return this;
}
/**
* Get the state property: Router Job Worker Selector State.
*
* @return the state value.
*/
public AcsRouterWorkerSelectorState getState() {
return this.state;
}
/**
* Set the state property: Router Job Worker Selector State.
*
* @param state the state value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setState(AcsRouterWorkerSelectorState state) {
this.state = state;
return this;
}
/**
* Get the expirationTime property: Router Job Worker Selector Expiration Time.
*
* @return the expirationTime value.
*/
public OffsetDateTime getExpirationTime() {
return this.expirationTime;
}
/**
* Set the expirationTime property: Router Job Worker Selector Expiration Time.
*
* @param expirationTime the expirationTime value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setExpirationTime(OffsetDateTime expirationTime) {
this.expirationTime = expirationTime;
return this;
}
} | class AcsRouterWorkerSelector {
/*
* Router Job Worker Selector Key
*/
@JsonProperty(value = "key")
private String key;
/*
* Router Job Worker Selector Label Operator
*/
@JsonProperty(value = "labelOperator")
private AcsRouterLabelOperator labelOperator;
/*
* Router Job Worker Selector Value
*/
@JsonProperty(value = "labelValue")
private Object labelValue;
/*
* Router Job Worker Selector Time to Live in Seconds
*/
@JsonProperty(value = "ttlSeconds")
private Float ttlSeconds;
/*
* Router Job Worker Selector State
*/
@JsonProperty(value = "state")
private AcsRouterWorkerSelectorState state;
/*
* Router Job Worker Selector Expiration Time
*/
@JsonProperty(value = "expirationTime")
private OffsetDateTime expirationTime;
/** Creates an instance of AcsRouterWorkerSelector class. */
public AcsRouterWorkerSelector() {}
/**
* Get the key property: Router Job Worker Selector Key.
*
* @return the key value.
*/
public String getKey() {
return this.key;
}
/**
* Set the key property: Router Job Worker Selector Key.
*
* @param key the key value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setKey(String key) {
this.key = key;
return this;
}
/**
* Get the labelOperator property: Router Job Worker Selector Label Operator.
*
* @return the labelOperator value.
*/
public AcsRouterLabelOperator getLabelOperator() {
return this.labelOperator;
}
/**
* Set the labelOperator property: Router Job Worker Selector Label Operator.
*
* @param labelOperator the labelOperator value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setLabelOperator(AcsRouterLabelOperator labelOperator) {
this.labelOperator = labelOperator;
return this;
}
/**
* Get the labelValue property: Router Job Worker Selector Value.
*
* @return the labelValue value.
*/
public Object getLabelValue() {
return this.labelValue;
}
/**
* Set the labelValue property: Router Job Worker Selector Value.
*
* @param labelValue the labelValue value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setLabelValue(Object labelValue) {
this.labelValue = labelValue;
return this;
}
/**
* Get the ttlSeconds property: Router Job Worker Selector Time to Live in Seconds.
*
* @return the ttlSeconds value.
*/
/**
* Set the timeToLive property: Router Job Worker Selector Time to Live in Seconds.
*
* @param timeToLive the timeToLive value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setTimeToLive(Duration timeToLive) {
if (timeToLive != null) {
this.ttlSeconds = (float) timeToLive.getSeconds();
}
return this;
}
/**
* Get the state property: Router Job Worker Selector State.
*
* @return the state value.
*/
public AcsRouterWorkerSelectorState getState() {
return this.state;
}
/**
* Set the state property: Router Job Worker Selector State.
*
* @param state the state value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setState(AcsRouterWorkerSelectorState state) {
this.state = state;
return this;
}
/**
* Get the expirationTime property: Router Job Worker Selector Expiration Time.
*
* @return the expirationTime value.
*/
public OffsetDateTime getExpirationTime() {
return this.expirationTime;
}
/**
* Set the expirationTime property: Router Job Worker Selector Expiration Time.
*
* @param expirationTime the expirationTime value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setExpirationTime(OffsetDateTime expirationTime) {
this.expirationTime = expirationTime;
return this;
}
} |
this was discussed and there's code-gen blocks to doing this well, I gather. | public AcsRouterJobClassificationFailedEventData setErrors(List<ResponseError> errors) {
this.errors =
errors.stream()
.map(e -> new AcsRouterCommunicationError().setCode(e.getCode()).setMessage(e.getMessage()))
.collect(Collectors.toList());
return this;
} | return this; | public AcsRouterJobClassificationFailedEventData setErrors(List<ResponseError> errors) {
this.errors =
errors.stream()
.map(e -> new AcsRouterCommunicationError().setCode(e.getCode()).setMessage(e.getMessage()))
.collect(Collectors.toList());
return this;
} | class AcsRouterJobClassificationFailedEventData extends AcsRouterJobEventData {
/*
* Router Job Classification Policy Id
*/
@JsonProperty(value = "classificationPolicyId")
private String classificationPolicyId;
/*
* Router Job Classification Failed Errors
*/
@JsonProperty(value = "errors")
private List<AcsRouterCommunicationError> errors;
/** Creates an instance of AcsRouterJobClassificationFailedEventData class. */
public AcsRouterJobClassificationFailedEventData() {}
/**
* Get the classificationPolicyId property: Router Job Classification Policy Id.
*
* @return the classificationPolicyId value.
*/
public String getClassificationPolicyId() {
return this.classificationPolicyId;
}
/**
* Set the classificationPolicyId property: Router Job Classification Policy Id.
*
* @param classificationPolicyId the classificationPolicyId value to set.
* @return the AcsRouterJobClassificationFailedEventData object itself.
*/
public AcsRouterJobClassificationFailedEventData setClassificationPolicyId(String classificationPolicyId) {
this.classificationPolicyId = classificationPolicyId;
return this;
}
/**
* Get the errors property: Router Job Classification Failed Errors.
*
* @return the errors value.
*/
public List<ResponseError> getErrors() {
return this.errors.stream()
.map(e -> new ResponseError(e.getCode(), e.getMessage()))
.collect(Collectors.toList());
}
/**
* Set the errors property: Router Job Classification Failed Errors.
*
* @param errors the errors value to set.
* @return the AcsRouterJobClassificationFailedEventData object itself.
*/
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setQueueId(String queueId) {
super.setQueueId(queueId);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setLabels(Map<String, String> labels) {
super.setLabels(labels);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setJobId(String jobId) {
super.setJobId(jobId);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setChannelReference(String channelReference) {
super.setChannelReference(channelReference);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setChannelId(String channelId) {
super.setChannelId(channelId);
return this;
}
} | class AcsRouterJobClassificationFailedEventData extends AcsRouterJobEventData {
/*
* Router Job Classification Policy Id
*/
@JsonProperty(value = "classificationPolicyId")
private String classificationPolicyId;
/*
* Router Job Classification Failed Errors
*/
@JsonProperty(value = "errors")
private List<AcsRouterCommunicationError> errors;
/** Creates an instance of AcsRouterJobClassificationFailedEventData class. */
public AcsRouterJobClassificationFailedEventData() {}
/**
* Get the classificationPolicyId property: Router Job Classification Policy Id.
*
* @return the classificationPolicyId value.
*/
public String getClassificationPolicyId() {
return this.classificationPolicyId;
}
/**
* Set the classificationPolicyId property: Router Job Classification Policy Id.
*
* @param classificationPolicyId the classificationPolicyId value to set.
* @return the AcsRouterJobClassificationFailedEventData object itself.
*/
public AcsRouterJobClassificationFailedEventData setClassificationPolicyId(String classificationPolicyId) {
this.classificationPolicyId = classificationPolicyId;
return this;
}
/**
* Get the errors property: Router Job Classification Failed Errors.
*
* @return the errors value.
*/
public List<ResponseError> getErrors() {
return this.errors.stream()
.map(e -> new ResponseError(e.getCode(), e.getMessage()))
.collect(Collectors.toList());
}
/**
* Set the errors property: Router Job Classification Failed Errors.
*
* @param errors the errors value to set.
* @return the AcsRouterJobClassificationFailedEventData object itself.
*/
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setQueueId(String queueId) {
super.setQueueId(queueId);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setLabels(Map<String, String> labels) {
super.setLabels(labels);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setJobId(String jobId) {
super.setJobId(jobId);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setChannelReference(String channelReference) {
super.setChannelReference(channelReference);
return this;
}
/** {@inheritDoc} */
@Override
public AcsRouterJobClassificationFailedEventData setChannelId(String channelId) {
super.setChannelId(channelId);
return this;
}
} |
This should allow setting `null` value as this is not a required field. We just have to convert to float if not null. | public AcsRouterWorkerSelector setTimeToLive(Duration timeToLive) {
Objects.requireNonNull(timeToLive);
this.ttlSeconds = (float) timeToLive.getSeconds();
return this;
} | Objects.requireNonNull(timeToLive); | public AcsRouterWorkerSelector setTimeToLive(Duration timeToLive) {
if (timeToLive != null) {
this.ttlSeconds = (float) timeToLive.getSeconds();
}
return this;
} | class AcsRouterWorkerSelector {
/*
* Router Job Worker Selector Key
*/
@JsonProperty(value = "key")
private String key;
/*
* Router Job Worker Selector Label Operator
*/
@JsonProperty(value = "labelOperator")
private AcsRouterLabelOperator labelOperator;
/*
* Router Job Worker Selector Value
*/
@JsonProperty(value = "labelValue")
private Object labelValue;
/*
* Router Job Worker Selector Time to Live in Seconds
*/
@JsonProperty(value = "ttlSeconds")
private Float ttlSeconds;
/*
* Router Job Worker Selector State
*/
@JsonProperty(value = "state")
private AcsRouterWorkerSelectorState state;
/*
* Router Job Worker Selector Expiration Time
*/
@JsonProperty(value = "expirationTime")
private OffsetDateTime expirationTime;
/** Creates an instance of AcsRouterWorkerSelector class. */
public AcsRouterWorkerSelector() {}
/**
* Get the key property: Router Job Worker Selector Key.
*
* @return the key value.
*/
public String getKey() {
return this.key;
}
/**
* Set the key property: Router Job Worker Selector Key.
*
* @param key the key value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setKey(String key) {
this.key = key;
return this;
}
/**
* Get the labelOperator property: Router Job Worker Selector Label Operator.
*
* @return the labelOperator value.
*/
public AcsRouterLabelOperator getLabelOperator() {
return this.labelOperator;
}
/**
* Set the labelOperator property: Router Job Worker Selector Label Operator.
*
* @param labelOperator the labelOperator value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setLabelOperator(AcsRouterLabelOperator labelOperator) {
this.labelOperator = labelOperator;
return this;
}
/**
* Get the labelValue property: Router Job Worker Selector Value.
*
* @return the labelValue value.
*/
public Object getLabelValue() {
return this.labelValue;
}
/**
* Set the labelValue property: Router Job Worker Selector Value.
*
* @param labelValue the labelValue value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setLabelValue(Object labelValue) {
this.labelValue = labelValue;
return this;
}
/**
* Get the ttlSeconds property: Router Job Worker Selector Time to Live in Seconds.
*
* @return the ttlSeconds value.
*/
public Duration getTimeToLive() {
return ttlSeconds == null ? null : Duration.ofSeconds(ttlSeconds.longValue());
}
/**
* Set the timeToLive property: Router Job Worker Selector Time to Live in Seconds.
*
* @param timeToLive the timeToLive value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
/**
* Get the state property: Router Job Worker Selector State.
*
* @return the state value.
*/
public AcsRouterWorkerSelectorState getState() {
return this.state;
}
/**
* Set the state property: Router Job Worker Selector State.
*
* @param state the state value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setState(AcsRouterWorkerSelectorState state) {
this.state = state;
return this;
}
/**
* Get the expirationTime property: Router Job Worker Selector Expiration Time.
*
* @return the expirationTime value.
*/
public OffsetDateTime getExpirationTime() {
return this.expirationTime;
}
/**
* Set the expirationTime property: Router Job Worker Selector Expiration Time.
*
* @param expirationTime the expirationTime value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setExpirationTime(OffsetDateTime expirationTime) {
this.expirationTime = expirationTime;
return this;
}
} | class AcsRouterWorkerSelector {
/*
* Router Job Worker Selector Key
*/
@JsonProperty(value = "key")
private String key;
/*
* Router Job Worker Selector Label Operator
*/
@JsonProperty(value = "labelOperator")
private AcsRouterLabelOperator labelOperator;
/*
* Router Job Worker Selector Value
*/
@JsonProperty(value = "labelValue")
private Object labelValue;
/*
* Router Job Worker Selector Time to Live in Seconds
*/
@JsonProperty(value = "ttlSeconds")
private Float ttlSeconds;
/*
* Router Job Worker Selector State
*/
@JsonProperty(value = "state")
private AcsRouterWorkerSelectorState state;
/*
* Router Job Worker Selector Expiration Time
*/
@JsonProperty(value = "expirationTime")
private OffsetDateTime expirationTime;
/** Creates an instance of AcsRouterWorkerSelector class. */
public AcsRouterWorkerSelector() {}
/**
* Get the key property: Router Job Worker Selector Key.
*
* @return the key value.
*/
public String getKey() {
return this.key;
}
/**
* Set the key property: Router Job Worker Selector Key.
*
* @param key the key value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setKey(String key) {
this.key = key;
return this;
}
/**
* Get the labelOperator property: Router Job Worker Selector Label Operator.
*
* @return the labelOperator value.
*/
public AcsRouterLabelOperator getLabelOperator() {
return this.labelOperator;
}
/**
* Set the labelOperator property: Router Job Worker Selector Label Operator.
*
* @param labelOperator the labelOperator value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setLabelOperator(AcsRouterLabelOperator labelOperator) {
this.labelOperator = labelOperator;
return this;
}
/**
* Get the labelValue property: Router Job Worker Selector Value.
*
* @return the labelValue value.
*/
public Object getLabelValue() {
return this.labelValue;
}
/**
* Set the labelValue property: Router Job Worker Selector Value.
*
* @param labelValue the labelValue value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setLabelValue(Object labelValue) {
this.labelValue = labelValue;
return this;
}
/**
* Get the ttlSeconds property: Router Job Worker Selector Time to Live in Seconds.
*
* @return the ttlSeconds value.
*/
public Duration getTimeToLive() {
return ttlSeconds == null ? null : Duration.ofSeconds(ttlSeconds.longValue());
}
/**
* Set the timeToLive property: Router Job Worker Selector Time to Live in Seconds.
*
* @param timeToLive the timeToLive value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
/**
* Get the state property: Router Job Worker Selector State.
*
* @return the state value.
*/
public AcsRouterWorkerSelectorState getState() {
return this.state;
}
/**
* Set the state property: Router Job Worker Selector State.
*
* @param state the state value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setState(AcsRouterWorkerSelectorState state) {
this.state = state;
return this;
}
/**
* Get the expirationTime property: Router Job Worker Selector Expiration Time.
*
* @return the expirationTime value.
*/
public OffsetDateTime getExpirationTime() {
return this.expirationTime;
}
/**
* Set the expirationTime property: Router Job Worker Selector Expiration Time.
*
* @param expirationTime the expirationTime value to set.
* @return the AcsRouterWorkerSelector object itself.
*/
public AcsRouterWorkerSelector setExpirationTime(OffsetDateTime expirationTime) {
this.expirationTime = expirationTime;
return this;
}
} |
Could a body sanitizer be used to match on the boundary format (`--[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{2}`) and sanitize it instead of using bodiless matching? Also, minor nit could we change the redaction value to `BOUNDARY` as we don't need to redact it but standardize it. | private void addTestRecordCustomSanitizers() {
interceptorManager.addSanitizers(Arrays.asList(
new TestProxySanitizer("$..key", null, "REDACTED", TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..endpoint", null, "https:
new TestProxySanitizer("Content-Type", "(^multipart\\/form-data; boundary=[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{2})",
"multipart\\/form-data; boundary=REDACTED", TestProxySanitizerType.HEADER)
));
} | new TestProxySanitizer("Content-Type", "(^multipart\\/form-data; boundary=[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{2})", | private void addTestRecordCustomSanitizers() {
interceptorManager.addSanitizers(Arrays.asList(
new TestProxySanitizer("$..key", null, "REDACTED", TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..endpoint", null, "https:
new TestProxySanitizer("Content-Type", "(^multipart\\/form-data; boundary=[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{2})",
"multipart\\/form-data; boundary=BOUNDARY", TestProxySanitizerType.HEADER)
));
} | class OpenAIClientTestBase extends TestProxyTestBase {
OpenAIClientBuilder getOpenAIClientBuilder(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.httpClient(httpClient)
.serviceVersion(serviceVersion);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
builder
.endpoint("https:
.credential(new AzureKeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
} else {
builder
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
}
return builder;
}
OpenAIClientBuilder getNonAzureOpenAIClientBuilder(HttpClient httpClient) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpClient(httpClient);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
builder
.credential(new KeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
} else {
builder
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
}
return builder;
}
protected String getAzureCognitiveSearchKey() {
String azureCognitiveSearchKey = Configuration.getGlobalConfiguration().get("ACS_BYOD_API_KEY");
if (getTestMode() == TestMode.PLAYBACK) {
return FAKE_API_KEY;
} else if (azureCognitiveSearchKey != null) {
return azureCognitiveSearchKey;
} else {
throw new IllegalStateException(
"No Azure Cognitive Search API key found. "
+ "Please set the appropriate environment variable to use this value.");
}
}
@Test
public abstract void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
void getCompletionsRunner(BiConsumer<String, List<String>> testRunner) {
String deploymentId = "text-davinci-003";
List<String> prompt = new ArrayList<>();
prompt.add("Say this is a test");
testRunner.accept(deploymentId, prompt);
}
void getCompletionsFromSinglePromptRunner(BiConsumer<String, String> testRunner) {
String deploymentId = "text-davinci-003";
String prompt = "Say this is a test";
testRunner.accept(deploymentId, prompt);
}
void getChatCompletionsRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-35-turbo", getChatMessages());
}
void getChatCompletionsForNonAzureRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo", getChatMessages());
}
void getChatCompletionsAzureChatSearchRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
ChatCompletionsOptions chatCompletionsOptions = new ChatCompletionsOptions(
Arrays.asList(new ChatMessage(ChatRole.USER, "What does PR complete mean?")));
testRunner.accept("gpt-4-0613", chatCompletionsOptions);
}
void getEmbeddingRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getEmbeddingNonAzureRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getImageGenerationRunner(Consumer<ImageGenerationOptions> testRunner) {
testRunner.accept(
new ImageGenerationOptions("A drawing of the Seattle skyline in the style of Van Gogh")
);
}
void getChatFunctionForNonAzureRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessagesWithFunction());
}
void getChatFunctionForRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-4-0613", getChatMessagesWithFunction());
}
void getChatCompletionsContentFilterRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-4", getChatMessages());
}
void getCompletionsContentFilterRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-003", "What is 3 times 4?");
}
void getChatCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessages());
}
void getCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-002", "What is 3 times 4?");
}
void getAudioTranscriptionRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "batman.wav");
}
void getAudioTranslationRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "JP_it_is_rainy_today.wav");
}
void getAudioTranscriptionRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "batman.wav");
}
void getAudioTranslationRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "JP_it_is_rainy_today.wav");
}
private List<ChatMessage> getChatMessages() {
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.SYSTEM, "You are a helpful assistant. You will talk like a pirate."));
chatMessages.add(new ChatMessage(ChatRole.USER, "Can you help me?"));
chatMessages.add(new ChatMessage(ChatRole.ASSISTANT, "Of course, me hearty! What can I do for ye?"));
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the best way to train a parrot?"));
return chatMessages;
}
private ChatCompletionsOptions getChatMessagesWithFunction() {
FunctionDefinition functionDefinition = new FunctionDefinition("MyFunction");
Parameters parameters = new Parameters();
functionDefinition.setParameters(parameters);
List<FunctionDefinition> functions = Arrays.asList(functionDefinition);
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the weather like in San Francisco in Celsius?"));
ChatCompletionsOptions chatCompletionOptions = new ChatCompletionsOptions(chatMessages);
chatCompletionOptions.setFunctions(functions);
return chatCompletionOptions;
}
static Path openTestResourceFile(String fileName) {
return Paths.get("src/test/resources/" + fileName);
}
static void assertCompletions(int choicesPerPrompt, Completions actual) {
assertCompletions(choicesPerPrompt, "stop", actual);
}
static void assertCompletions(int choicesPerPrompt, String expectedFinishReason, Completions actual) {
assertNotNull(actual);
assertInstanceOf(Completions.class, actual);
assertChoices(choicesPerPrompt, expectedFinishReason, actual.getChoices());
assertNotNull(actual.getUsage());
}
static <T> T assertAndGetValueFromResponse(Response<BinaryData> actualResponse, Class<T> clazz, int expectedCode) {
assertNotNull(actualResponse);
assertEquals(expectedCode, actualResponse.getStatusCode());
assertInstanceOf(Response.class, actualResponse);
BinaryData binaryData = actualResponse.getValue();
assertNotNull(binaryData);
T object = binaryData.toObject(clazz);
assertNotNull(object);
assertInstanceOf(clazz, object);
return object;
}
static void assertChoices(int choicesPerPrompt, String expectedFinishReason, List<Choice> actual) {
assertEquals(choicesPerPrompt, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChoice(i, expectedFinishReason, actual.get(i));
}
}
static void assertChoice(int index, String expectedFinishReason, Choice actual) {
assertNotNull(actual.getText());
assertEquals(index, actual.getIndex());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertChatCompletions(int choiceCount, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, "stop", ChatRole.ASSISTANT, choices);
assertNotNull(actual.getUsage());
}
static void assertChatCompletionsStream(ChatCompletions chatCompletions) {
if (chatCompletions.getId() != null && !chatCompletions.getId().isEmpty()) {
assertNotNull(chatCompletions.getId());
assertNotNull(chatCompletions.getChoices());
assertFalse(chatCompletions.getChoices().isEmpty());
assertNotNull(chatCompletions.getChoices().get(0).getDelta());
}
}
static void assertCompletionsStream(Completions completions) {
if (completions.getId() != null && !completions.getId().isEmpty()) {
assertNotNull(completions.getId());
assertNotNull(completions.getChoices());
assertFalse(completions.getChoices().isEmpty());
assertNotNull(completions.getChoices().get(0).getText());
}
}
static void assertChatCompletions(int choiceCount, String expectedFinishReason, ChatRole chatRole, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, expectedFinishReason, chatRole, choices);
assertNotNull(actual.getUsage());
}
static void assertChatChoices(int choiceCount, String expectedFinishReason, ChatRole chatRole, List<ChatChoice> actual) {
assertEquals(choiceCount, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChatChoice(i, expectedFinishReason, chatRole, actual.get(i));
}
}
static void assertChatChoice(int index, String expectedFinishReason, ChatRole chatRole, ChatChoice actual) {
assertEquals(index, actual.getIndex());
assertEquals(chatRole, actual.getMessage().getRole());
assertNotNull(actual.getMessage().getContent());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertEmbeddings(Embeddings actual) {
List<EmbeddingItem> data = actual.getData();
assertNotNull(data);
assertTrue(data.size() > 0);
for (EmbeddingItem item : data) {
List<Double> embedding = item.getEmbedding();
assertNotNull(embedding);
assertTrue(embedding.size() > 0);
}
assertNotNull(actual.getUsage());
}
static void assertImageResponse(ImageResponse actual) {
assertNotNull(actual.getData());
assertFalse(actual.getData().isEmpty());
}
static <T> T assertFunctionCall(ChatChoice actual, String functionName, Class<T> myPropertiesClazz) {
assertEquals(0, actual.getIndex());
assertEquals("function_call", actual.getFinishReason().toString());
FunctionCall functionCall = actual.getMessage().getFunctionCall();
assertEquals(functionName, functionCall.getName());
BinaryData argumentJson = BinaryData.fromString(functionCall.getArguments());
return argumentJson.toObject(myPropertiesClazz);
}
static void assertSafeContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertFalse(contentFilterResults.getHate().isFiltered());
assertEquals(contentFilterResults.getHate().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSexual().isFiltered());
assertEquals(contentFilterResults.getSexual().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSelfHarm().isFiltered());
assertEquals(contentFilterResults.getSelfHarm().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getViolence().isFiltered());
assertEquals(contentFilterResults.getViolence().getSeverity(), ContentFilterSeverity.SAFE);
}
static void assertEmptyContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertNull(contentFilterResults.getHate());
assertNull(contentFilterResults.getSexual());
assertNull(contentFilterResults.getViolence());
assertNull(contentFilterResults.getSelfHarm());
}
static void assertChatCompletionsCognitiveSearch(ChatCompletions chatCompletions) {
List<ChatChoice> choices = chatCompletions.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(1, CompletionsFinishReason.STOPPED.toString(), ChatRole.ASSISTANT, choices);
AzureChatExtensionsMessageContext messageContext = choices.get(0).getMessage().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
}
static void assertChatCompletionsStreamingCognitiveSearch(Stream<ChatCompletions> chatCompletionsStream) {
List<ChatCompletions> chatCompletions = chatCompletionsStream.collect(Collectors.toList());
assertTrue(chatCompletions.toArray().length > 1);
for (int i = 0; i < chatCompletions.size(); i++) {
ChatCompletions chatCompletion = chatCompletions.get(i);
List<ChatChoice> choices = chatCompletion.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
if (i == 0) {
AzureChatExtensionsMessageContext messageContext = choices.get(0).getDelta().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
} else if (i == 1) {
assertNull(choices.get(0).getDelta().getContext());
assertEquals(choices.get(0).getDelta().getRole(), ChatRole.ASSISTANT);
} else if (i == chatCompletions.size() - 1) {
assertEquals(choices.get(0).getFinishReason(), CompletionsFinishReason.STOPPED);
} else {
assertNotNull(choices.get(0).getDelta().getContent());
}
}
}
static void assertAudioTranscriptionSimpleJson(AudioTranscription transcription, String expectedText) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNull(transcription.getDuration());
assertNull(transcription.getLanguage());
assertNull(transcription.getTask());
assertNull(transcription.getSegments());
}
static void assertAudioTranscriptionVerboseJson(AudioTranscription transcription, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNotNull(transcription.getDuration());
assertNotNull(transcription.getLanguage());
assertEquals(audioTaskLabel, transcription.getTask());
assertNotNull(transcription.getSegments());
assertFalse(transcription.getSegments().isEmpty());
}
static void assertAudioTranslationSimpleJson(AudioTranslation translation, String expectedText) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNull(translation.getDuration());
assertNull(translation.getLanguage());
assertNull(translation.getTask());
assertNull(translation.getSegments());
}
static void assertAudioTranslationVerboseJson(AudioTranslation translation, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNotNull(translation.getDuration());
assertNotNull(translation.getLanguage());
assertEquals(audioTaskLabel, translation.getTask());
assertNotNull(translation.getSegments());
assertFalse(translation.getSegments().isEmpty());
}
protected static final String BATMAN_TRANSCRIPTION =
"Skills and Abilities. Batman has no inherent superpowers. He relies on his own "
+ "scientific knowledge, detective skills, and athletic prowess. In the stories, Batman is "
+ "regarded as one of the world's greatest detectives, if not the world's greatest "
+ "crime solver. Batman has been repeatedly described as having genius-level intellect, one of"
+ " the greatest martial artists in the DC universe, and having peak human physical "
+ "conditioning. He has traveled the world acquiring the skills needed to aid his crusade "
+ "against crime. His knowledge and expertise in almost every discipline known to man is nearly "
+ "unparalleled by any other character in the universe. Batman's inexhaustible wealth allows "
+ "him to access advanced technology. As a proficient scientist, he is able to use and modify "
+ "those technologies to his advantage. Batman describes Superman as the most dangerous man on "
+ "earth, able to defeat a team of super-powered extraterrestrials by himself in order to "
+ "rescue his imprisoned teammates in the first storyline. Superman also considers Batman "
+ "to be one of the most brilliant minds on the planet. Batman has the ability to function "
+ "under great physical pain and withstand mind control. He is a master of disguise, multilingual, "
+ "and an expert in espionage, often gathering information under different identities. "
+ "Batman's karate, judo, and jujitsu training has made him a master of stealth and escape, "
+ "allowing him to appear and disappear at will, and to break free from the chains of his past.";
} | class OpenAIClientTestBase extends TestProxyTestBase {
OpenAIClientBuilder getOpenAIClientBuilder(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.httpClient(httpClient)
.serviceVersion(serviceVersion);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
builder
.endpoint("https:
.credential(new AzureKeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
} else {
builder
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
}
return builder;
}
OpenAIClientBuilder getNonAzureOpenAIClientBuilder(HttpClient httpClient) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpClient(httpClient);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
builder
.credential(new KeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
} else {
builder
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
}
return builder;
}
protected String getAzureCognitiveSearchKey() {
String azureCognitiveSearchKey = Configuration.getGlobalConfiguration().get("ACS_BYOD_API_KEY");
if (getTestMode() == TestMode.PLAYBACK) {
return FAKE_API_KEY;
} else if (azureCognitiveSearchKey != null) {
return azureCognitiveSearchKey;
} else {
throw new IllegalStateException(
"No Azure Cognitive Search API key found. "
+ "Please set the appropriate environment variable to use this value.");
}
}
@Test
public abstract void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
void getCompletionsRunner(BiConsumer<String, List<String>> testRunner) {
String deploymentId = "text-davinci-003";
List<String> prompt = new ArrayList<>();
prompt.add("Say this is a test");
testRunner.accept(deploymentId, prompt);
}
void getCompletionsFromSinglePromptRunner(BiConsumer<String, String> testRunner) {
String deploymentId = "text-davinci-003";
String prompt = "Say this is a test";
testRunner.accept(deploymentId, prompt);
}
void getChatCompletionsRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-35-turbo", getChatMessages());
}
void getChatCompletionsForNonAzureRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo", getChatMessages());
}
void getChatCompletionsAzureChatSearchRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
ChatCompletionsOptions chatCompletionsOptions = new ChatCompletionsOptions(
Arrays.asList(new ChatMessage(ChatRole.USER, "What does PR complete mean?")));
testRunner.accept("gpt-4-0613", chatCompletionsOptions);
}
void getEmbeddingRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getEmbeddingNonAzureRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getImageGenerationRunner(Consumer<ImageGenerationOptions> testRunner) {
testRunner.accept(
new ImageGenerationOptions("A drawing of the Seattle skyline in the style of Van Gogh")
);
}
void getChatFunctionForNonAzureRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessagesWithFunction());
}
void getChatFunctionForRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-4-0613", getChatMessagesWithFunction());
}
void getChatCompletionsContentFilterRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-4", getChatMessages());
}
void getCompletionsContentFilterRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-003", "What is 3 times 4?");
}
void getChatCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessages());
}
void getCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-002", "What is 3 times 4?");
}
void getAudioTranscriptionRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "batman.wav");
}
void getAudioTranslationRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "JP_it_is_rainy_today.wav");
}
void getAudioTranscriptionRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "batman.wav");
}
void getAudioTranslationRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "JP_it_is_rainy_today.wav");
}
private List<ChatMessage> getChatMessages() {
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.SYSTEM, "You are a helpful assistant. You will talk like a pirate."));
chatMessages.add(new ChatMessage(ChatRole.USER, "Can you help me?"));
chatMessages.add(new ChatMessage(ChatRole.ASSISTANT, "Of course, me hearty! What can I do for ye?"));
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the best way to train a parrot?"));
return chatMessages;
}
private ChatCompletionsOptions getChatMessagesWithFunction() {
FunctionDefinition functionDefinition = new FunctionDefinition("MyFunction");
Parameters parameters = new Parameters();
functionDefinition.setParameters(parameters);
List<FunctionDefinition> functions = Arrays.asList(functionDefinition);
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the weather like in San Francisco in Celsius?"));
ChatCompletionsOptions chatCompletionOptions = new ChatCompletionsOptions(chatMessages);
chatCompletionOptions.setFunctions(functions);
return chatCompletionOptions;
}
static Path openTestResourceFile(String fileName) {
return Paths.get("src/test/resources/" + fileName);
}
static void assertCompletions(int choicesPerPrompt, Completions actual) {
assertCompletions(choicesPerPrompt, "stop", actual);
}
static void assertCompletions(int choicesPerPrompt, String expectedFinishReason, Completions actual) {
assertNotNull(actual);
assertInstanceOf(Completions.class, actual);
assertChoices(choicesPerPrompt, expectedFinishReason, actual.getChoices());
assertNotNull(actual.getUsage());
}
static <T> T assertAndGetValueFromResponse(Response<BinaryData> actualResponse, Class<T> clazz, int expectedCode) {
assertNotNull(actualResponse);
assertEquals(expectedCode, actualResponse.getStatusCode());
assertInstanceOf(Response.class, actualResponse);
BinaryData binaryData = actualResponse.getValue();
assertNotNull(binaryData);
T object = binaryData.toObject(clazz);
assertNotNull(object);
assertInstanceOf(clazz, object);
return object;
}
static void assertChoices(int choicesPerPrompt, String expectedFinishReason, List<Choice> actual) {
assertEquals(choicesPerPrompt, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChoice(i, expectedFinishReason, actual.get(i));
}
}
static void assertChoice(int index, String expectedFinishReason, Choice actual) {
assertNotNull(actual.getText());
assertEquals(index, actual.getIndex());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertChatCompletions(int choiceCount, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, "stop", ChatRole.ASSISTANT, choices);
assertNotNull(actual.getUsage());
}
static void assertChatCompletionsStream(ChatCompletions chatCompletions) {
if (chatCompletions.getId() != null && !chatCompletions.getId().isEmpty()) {
assertNotNull(chatCompletions.getId());
assertNotNull(chatCompletions.getChoices());
assertFalse(chatCompletions.getChoices().isEmpty());
assertNotNull(chatCompletions.getChoices().get(0).getDelta());
}
}
static void assertCompletionsStream(Completions completions) {
if (completions.getId() != null && !completions.getId().isEmpty()) {
assertNotNull(completions.getId());
assertNotNull(completions.getChoices());
assertFalse(completions.getChoices().isEmpty());
assertNotNull(completions.getChoices().get(0).getText());
}
}
static void assertChatCompletions(int choiceCount, String expectedFinishReason, ChatRole chatRole, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, expectedFinishReason, chatRole, choices);
assertNotNull(actual.getUsage());
}
static void assertChatChoices(int choiceCount, String expectedFinishReason, ChatRole chatRole, List<ChatChoice> actual) {
assertEquals(choiceCount, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChatChoice(i, expectedFinishReason, chatRole, actual.get(i));
}
}
static void assertChatChoice(int index, String expectedFinishReason, ChatRole chatRole, ChatChoice actual) {
assertEquals(index, actual.getIndex());
assertEquals(chatRole, actual.getMessage().getRole());
assertNotNull(actual.getMessage().getContent());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertEmbeddings(Embeddings actual) {
List<EmbeddingItem> data = actual.getData();
assertNotNull(data);
assertTrue(data.size() > 0);
for (EmbeddingItem item : data) {
List<Double> embedding = item.getEmbedding();
assertNotNull(embedding);
assertTrue(embedding.size() > 0);
}
assertNotNull(actual.getUsage());
}
static void assertImageResponse(ImageResponse actual) {
assertNotNull(actual.getData());
assertFalse(actual.getData().isEmpty());
}
static <T> T assertFunctionCall(ChatChoice actual, String functionName, Class<T> myPropertiesClazz) {
assertEquals(0, actual.getIndex());
assertEquals("function_call", actual.getFinishReason().toString());
FunctionCall functionCall = actual.getMessage().getFunctionCall();
assertEquals(functionName, functionCall.getName());
BinaryData argumentJson = BinaryData.fromString(functionCall.getArguments());
return argumentJson.toObject(myPropertiesClazz);
}
static void assertSafeContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertFalse(contentFilterResults.getHate().isFiltered());
assertEquals(contentFilterResults.getHate().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSexual().isFiltered());
assertEquals(contentFilterResults.getSexual().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSelfHarm().isFiltered());
assertEquals(contentFilterResults.getSelfHarm().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getViolence().isFiltered());
assertEquals(contentFilterResults.getViolence().getSeverity(), ContentFilterSeverity.SAFE);
}
static void assertEmptyContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertNull(contentFilterResults.getHate());
assertNull(contentFilterResults.getSexual());
assertNull(contentFilterResults.getViolence());
assertNull(contentFilterResults.getSelfHarm());
}
static void assertChatCompletionsCognitiveSearch(ChatCompletions chatCompletions) {
List<ChatChoice> choices = chatCompletions.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(1, CompletionsFinishReason.STOPPED.toString(), ChatRole.ASSISTANT, choices);
AzureChatExtensionsMessageContext messageContext = choices.get(0).getMessage().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
}
static void assertChatCompletionsStreamingCognitiveSearch(Stream<ChatCompletions> chatCompletionsStream) {
List<ChatCompletions> chatCompletions = chatCompletionsStream.collect(Collectors.toList());
assertTrue(chatCompletions.toArray().length > 1);
for (int i = 0; i < chatCompletions.size(); i++) {
ChatCompletions chatCompletion = chatCompletions.get(i);
List<ChatChoice> choices = chatCompletion.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
if (i == 0) {
AzureChatExtensionsMessageContext messageContext = choices.get(0).getDelta().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
} else if (i == 1) {
assertNull(choices.get(0).getDelta().getContext());
assertEquals(choices.get(0).getDelta().getRole(), ChatRole.ASSISTANT);
} else if (i == chatCompletions.size() - 1) {
assertEquals(choices.get(0).getFinishReason(), CompletionsFinishReason.STOPPED);
} else {
assertNotNull(choices.get(0).getDelta().getContent());
}
}
}
static void assertAudioTranscriptionSimpleJson(AudioTranscription transcription, String expectedText) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNull(transcription.getDuration());
assertNull(transcription.getLanguage());
assertNull(transcription.getTask());
assertNull(transcription.getSegments());
}
static void assertAudioTranscriptionVerboseJson(AudioTranscription transcription, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNotNull(transcription.getDuration());
assertNotNull(transcription.getLanguage());
assertEquals(audioTaskLabel, transcription.getTask());
assertNotNull(transcription.getSegments());
assertFalse(transcription.getSegments().isEmpty());
}
static void assertAudioTranslationSimpleJson(AudioTranslation translation, String expectedText) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNull(translation.getDuration());
assertNull(translation.getLanguage());
assertNull(translation.getTask());
assertNull(translation.getSegments());
}
static void assertAudioTranslationVerboseJson(AudioTranslation translation, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNotNull(translation.getDuration());
assertNotNull(translation.getLanguage());
assertEquals(audioTaskLabel, translation.getTask());
assertNotNull(translation.getSegments());
assertFalse(translation.getSegments().isEmpty());
}
protected static final String BATMAN_TRANSCRIPTION =
"Skills and Abilities. Batman has no inherent superpowers. He relies on his own "
+ "scientific knowledge, detective skills, and athletic prowess. In the stories, Batman is "
+ "regarded as one of the world's greatest detectives, if not the world's greatest "
+ "crime solver. Batman has been repeatedly described as having genius-level intellect, one of"
+ " the greatest martial artists in the DC universe, and having peak human physical "
+ "conditioning. He has traveled the world acquiring the skills needed to aid his crusade "
+ "against crime. His knowledge and expertise in almost every discipline known to man is nearly "
+ "unparalleled by any other character in the universe. Batman's inexhaustible wealth allows "
+ "him to access advanced technology. As a proficient scientist, he is able to use and modify "
+ "those technologies to his advantage. Batman describes Superman as the most dangerous man on "
+ "earth, able to defeat a team of super-powered extraterrestrials by himself in order to "
+ "rescue his imprisoned teammates in the first storyline. Superman also considers Batman "
+ "to be one of the most brilliant minds on the planet. Batman has the ability to function "
+ "under great physical pain and withstand mind control. He is a master of disguise, multilingual, "
+ "and an expert in espionage, often gathering information under different identities. "
+ "Batman's karate, judo, and jujitsu training has made him a master of stealth and escape, "
+ "allowing him to appear and disappear at will, and to break free from the chains of his past.";
} |
It doesn't work in the request body somehow, I tried with exact same regexp ` new TestProxySanitizer( "(--[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{2})", "REDACT". TestProxySanitizerType.BODY_REGEX)` | private void addTestRecordCustomSanitizers() {
interceptorManager.addSanitizers(Arrays.asList(
new TestProxySanitizer("$..key", null, "REDACTED", TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..endpoint", null, "https:
new TestProxySanitizer("Content-Type", "(^multipart\\/form-data; boundary=[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{2})",
"multipart\\/form-data; boundary=REDACTED", TestProxySanitizerType.HEADER)
));
} | new TestProxySanitizer("Content-Type", "(^multipart\\/form-data; boundary=[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{2})", | private void addTestRecordCustomSanitizers() {
interceptorManager.addSanitizers(Arrays.asList(
new TestProxySanitizer("$..key", null, "REDACTED", TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..endpoint", null, "https:
new TestProxySanitizer("Content-Type", "(^multipart\\/form-data; boundary=[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{2})",
"multipart\\/form-data; boundary=BOUNDARY", TestProxySanitizerType.HEADER)
));
} | class OpenAIClientTestBase extends TestProxyTestBase {
OpenAIClientBuilder getOpenAIClientBuilder(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.httpClient(httpClient)
.serviceVersion(serviceVersion);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
builder
.endpoint("https:
.credential(new AzureKeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
} else {
builder
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
}
return builder;
}
OpenAIClientBuilder getNonAzureOpenAIClientBuilder(HttpClient httpClient) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpClient(httpClient);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
builder
.credential(new KeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
} else {
builder
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
}
return builder;
}
protected String getAzureCognitiveSearchKey() {
String azureCognitiveSearchKey = Configuration.getGlobalConfiguration().get("ACS_BYOD_API_KEY");
if (getTestMode() == TestMode.PLAYBACK) {
return FAKE_API_KEY;
} else if (azureCognitiveSearchKey != null) {
return azureCognitiveSearchKey;
} else {
throw new IllegalStateException(
"No Azure Cognitive Search API key found. "
+ "Please set the appropriate environment variable to use this value.");
}
}
@Test
public abstract void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
void getCompletionsRunner(BiConsumer<String, List<String>> testRunner) {
String deploymentId = "text-davinci-003";
List<String> prompt = new ArrayList<>();
prompt.add("Say this is a test");
testRunner.accept(deploymentId, prompt);
}
void getCompletionsFromSinglePromptRunner(BiConsumer<String, String> testRunner) {
String deploymentId = "text-davinci-003";
String prompt = "Say this is a test";
testRunner.accept(deploymentId, prompt);
}
void getChatCompletionsRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-35-turbo", getChatMessages());
}
void getChatCompletionsForNonAzureRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo", getChatMessages());
}
void getChatCompletionsAzureChatSearchRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
ChatCompletionsOptions chatCompletionsOptions = new ChatCompletionsOptions(
Arrays.asList(new ChatMessage(ChatRole.USER, "What does PR complete mean?")));
testRunner.accept("gpt-4-0613", chatCompletionsOptions);
}
void getEmbeddingRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getEmbeddingNonAzureRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getImageGenerationRunner(Consumer<ImageGenerationOptions> testRunner) {
testRunner.accept(
new ImageGenerationOptions("A drawing of the Seattle skyline in the style of Van Gogh")
);
}
void getChatFunctionForNonAzureRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessagesWithFunction());
}
void getChatFunctionForRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-4-0613", getChatMessagesWithFunction());
}
void getChatCompletionsContentFilterRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-4", getChatMessages());
}
void getCompletionsContentFilterRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-003", "What is 3 times 4?");
}
void getChatCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessages());
}
void getCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-002", "What is 3 times 4?");
}
void getAudioTranscriptionRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "batman.wav");
}
void getAudioTranslationRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "JP_it_is_rainy_today.wav");
}
void getAudioTranscriptionRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "batman.wav");
}
void getAudioTranslationRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "JP_it_is_rainy_today.wav");
}
private List<ChatMessage> getChatMessages() {
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.SYSTEM, "You are a helpful assistant. You will talk like a pirate."));
chatMessages.add(new ChatMessage(ChatRole.USER, "Can you help me?"));
chatMessages.add(new ChatMessage(ChatRole.ASSISTANT, "Of course, me hearty! What can I do for ye?"));
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the best way to train a parrot?"));
return chatMessages;
}
private ChatCompletionsOptions getChatMessagesWithFunction() {
FunctionDefinition functionDefinition = new FunctionDefinition("MyFunction");
Parameters parameters = new Parameters();
functionDefinition.setParameters(parameters);
List<FunctionDefinition> functions = Arrays.asList(functionDefinition);
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the weather like in San Francisco in Celsius?"));
ChatCompletionsOptions chatCompletionOptions = new ChatCompletionsOptions(chatMessages);
chatCompletionOptions.setFunctions(functions);
return chatCompletionOptions;
}
static Path openTestResourceFile(String fileName) {
return Paths.get("src/test/resources/" + fileName);
}
static void assertCompletions(int choicesPerPrompt, Completions actual) {
assertCompletions(choicesPerPrompt, "stop", actual);
}
static void assertCompletions(int choicesPerPrompt, String expectedFinishReason, Completions actual) {
assertNotNull(actual);
assertInstanceOf(Completions.class, actual);
assertChoices(choicesPerPrompt, expectedFinishReason, actual.getChoices());
assertNotNull(actual.getUsage());
}
static <T> T assertAndGetValueFromResponse(Response<BinaryData> actualResponse, Class<T> clazz, int expectedCode) {
assertNotNull(actualResponse);
assertEquals(expectedCode, actualResponse.getStatusCode());
assertInstanceOf(Response.class, actualResponse);
BinaryData binaryData = actualResponse.getValue();
assertNotNull(binaryData);
T object = binaryData.toObject(clazz);
assertNotNull(object);
assertInstanceOf(clazz, object);
return object;
}
static void assertChoices(int choicesPerPrompt, String expectedFinishReason, List<Choice> actual) {
assertEquals(choicesPerPrompt, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChoice(i, expectedFinishReason, actual.get(i));
}
}
static void assertChoice(int index, String expectedFinishReason, Choice actual) {
assertNotNull(actual.getText());
assertEquals(index, actual.getIndex());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertChatCompletions(int choiceCount, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, "stop", ChatRole.ASSISTANT, choices);
assertNotNull(actual.getUsage());
}
static void assertChatCompletionsStream(ChatCompletions chatCompletions) {
if (chatCompletions.getId() != null && !chatCompletions.getId().isEmpty()) {
assertNotNull(chatCompletions.getId());
assertNotNull(chatCompletions.getChoices());
assertFalse(chatCompletions.getChoices().isEmpty());
assertNotNull(chatCompletions.getChoices().get(0).getDelta());
}
}
static void assertCompletionsStream(Completions completions) {
if (completions.getId() != null && !completions.getId().isEmpty()) {
assertNotNull(completions.getId());
assertNotNull(completions.getChoices());
assertFalse(completions.getChoices().isEmpty());
assertNotNull(completions.getChoices().get(0).getText());
}
}
static void assertChatCompletions(int choiceCount, String expectedFinishReason, ChatRole chatRole, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, expectedFinishReason, chatRole, choices);
assertNotNull(actual.getUsage());
}
static void assertChatChoices(int choiceCount, String expectedFinishReason, ChatRole chatRole, List<ChatChoice> actual) {
assertEquals(choiceCount, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChatChoice(i, expectedFinishReason, chatRole, actual.get(i));
}
}
static void assertChatChoice(int index, String expectedFinishReason, ChatRole chatRole, ChatChoice actual) {
assertEquals(index, actual.getIndex());
assertEquals(chatRole, actual.getMessage().getRole());
assertNotNull(actual.getMessage().getContent());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertEmbeddings(Embeddings actual) {
List<EmbeddingItem> data = actual.getData();
assertNotNull(data);
assertTrue(data.size() > 0);
for (EmbeddingItem item : data) {
List<Double> embedding = item.getEmbedding();
assertNotNull(embedding);
assertTrue(embedding.size() > 0);
}
assertNotNull(actual.getUsage());
}
static void assertImageResponse(ImageResponse actual) {
assertNotNull(actual.getData());
assertFalse(actual.getData().isEmpty());
}
static <T> T assertFunctionCall(ChatChoice actual, String functionName, Class<T> myPropertiesClazz) {
assertEquals(0, actual.getIndex());
assertEquals("function_call", actual.getFinishReason().toString());
FunctionCall functionCall = actual.getMessage().getFunctionCall();
assertEquals(functionName, functionCall.getName());
BinaryData argumentJson = BinaryData.fromString(functionCall.getArguments());
return argumentJson.toObject(myPropertiesClazz);
}
static void assertSafeContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertFalse(contentFilterResults.getHate().isFiltered());
assertEquals(contentFilterResults.getHate().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSexual().isFiltered());
assertEquals(contentFilterResults.getSexual().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSelfHarm().isFiltered());
assertEquals(contentFilterResults.getSelfHarm().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getViolence().isFiltered());
assertEquals(contentFilterResults.getViolence().getSeverity(), ContentFilterSeverity.SAFE);
}
static void assertEmptyContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertNull(contentFilterResults.getHate());
assertNull(contentFilterResults.getSexual());
assertNull(contentFilterResults.getViolence());
assertNull(contentFilterResults.getSelfHarm());
}
static void assertChatCompletionsCognitiveSearch(ChatCompletions chatCompletions) {
List<ChatChoice> choices = chatCompletions.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(1, CompletionsFinishReason.STOPPED.toString(), ChatRole.ASSISTANT, choices);
AzureChatExtensionsMessageContext messageContext = choices.get(0).getMessage().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
}
static void assertChatCompletionsStreamingCognitiveSearch(Stream<ChatCompletions> chatCompletionsStream) {
List<ChatCompletions> chatCompletions = chatCompletionsStream.collect(Collectors.toList());
assertTrue(chatCompletions.toArray().length > 1);
for (int i = 0; i < chatCompletions.size(); i++) {
ChatCompletions chatCompletion = chatCompletions.get(i);
List<ChatChoice> choices = chatCompletion.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
if (i == 0) {
AzureChatExtensionsMessageContext messageContext = choices.get(0).getDelta().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
} else if (i == 1) {
assertNull(choices.get(0).getDelta().getContext());
assertEquals(choices.get(0).getDelta().getRole(), ChatRole.ASSISTANT);
} else if (i == chatCompletions.size() - 1) {
assertEquals(choices.get(0).getFinishReason(), CompletionsFinishReason.STOPPED);
} else {
assertNotNull(choices.get(0).getDelta().getContent());
}
}
}
static void assertAudioTranscriptionSimpleJson(AudioTranscription transcription, String expectedText) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNull(transcription.getDuration());
assertNull(transcription.getLanguage());
assertNull(transcription.getTask());
assertNull(transcription.getSegments());
}
static void assertAudioTranscriptionVerboseJson(AudioTranscription transcription, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNotNull(transcription.getDuration());
assertNotNull(transcription.getLanguage());
assertEquals(audioTaskLabel, transcription.getTask());
assertNotNull(transcription.getSegments());
assertFalse(transcription.getSegments().isEmpty());
}
static void assertAudioTranslationSimpleJson(AudioTranslation translation, String expectedText) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNull(translation.getDuration());
assertNull(translation.getLanguage());
assertNull(translation.getTask());
assertNull(translation.getSegments());
}
static void assertAudioTranslationVerboseJson(AudioTranslation translation, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNotNull(translation.getDuration());
assertNotNull(translation.getLanguage());
assertEquals(audioTaskLabel, translation.getTask());
assertNotNull(translation.getSegments());
assertFalse(translation.getSegments().isEmpty());
}
protected static final String BATMAN_TRANSCRIPTION =
"Skills and Abilities. Batman has no inherent superpowers. He relies on his own "
+ "scientific knowledge, detective skills, and athletic prowess. In the stories, Batman is "
+ "regarded as one of the world's greatest detectives, if not the world's greatest "
+ "crime solver. Batman has been repeatedly described as having genius-level intellect, one of"
+ " the greatest martial artists in the DC universe, and having peak human physical "
+ "conditioning. He has traveled the world acquiring the skills needed to aid his crusade "
+ "against crime. His knowledge and expertise in almost every discipline known to man is nearly "
+ "unparalleled by any other character in the universe. Batman's inexhaustible wealth allows "
+ "him to access advanced technology. As a proficient scientist, he is able to use and modify "
+ "those technologies to his advantage. Batman describes Superman as the most dangerous man on "
+ "earth, able to defeat a team of super-powered extraterrestrials by himself in order to "
+ "rescue his imprisoned teammates in the first storyline. Superman also considers Batman "
+ "to be one of the most brilliant minds on the planet. Batman has the ability to function "
+ "under great physical pain and withstand mind control. He is a master of disguise, multilingual, "
+ "and an expert in espionage, often gathering information under different identities. "
+ "Batman's karate, judo, and jujitsu training has made him a master of stealth and escape, "
+ "allowing him to appear and disappear at will, and to break free from the chains of his past.";
} | class OpenAIClientTestBase extends TestProxyTestBase {
OpenAIClientBuilder getOpenAIClientBuilder(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.httpClient(httpClient)
.serviceVersion(serviceVersion);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
builder
.endpoint("https:
.credential(new AzureKeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
} else {
builder
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
}
return builder;
}
OpenAIClientBuilder getNonAzureOpenAIClientBuilder(HttpClient httpClient) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpClient(httpClient);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
builder
.credential(new KeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
} else {
builder
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
}
return builder;
}
protected String getAzureCognitiveSearchKey() {
String azureCognitiveSearchKey = Configuration.getGlobalConfiguration().get("ACS_BYOD_API_KEY");
if (getTestMode() == TestMode.PLAYBACK) {
return FAKE_API_KEY;
} else if (azureCognitiveSearchKey != null) {
return azureCognitiveSearchKey;
} else {
throw new IllegalStateException(
"No Azure Cognitive Search API key found. "
+ "Please set the appropriate environment variable to use this value.");
}
}
@Test
public abstract void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
void getCompletionsRunner(BiConsumer<String, List<String>> testRunner) {
String deploymentId = "text-davinci-003";
List<String> prompt = new ArrayList<>();
prompt.add("Say this is a test");
testRunner.accept(deploymentId, prompt);
}
void getCompletionsFromSinglePromptRunner(BiConsumer<String, String> testRunner) {
String deploymentId = "text-davinci-003";
String prompt = "Say this is a test";
testRunner.accept(deploymentId, prompt);
}
void getChatCompletionsRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-35-turbo", getChatMessages());
}
void getChatCompletionsForNonAzureRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo", getChatMessages());
}
void getChatCompletionsAzureChatSearchRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
ChatCompletionsOptions chatCompletionsOptions = new ChatCompletionsOptions(
Arrays.asList(new ChatMessage(ChatRole.USER, "What does PR complete mean?")));
testRunner.accept("gpt-4-0613", chatCompletionsOptions);
}
void getEmbeddingRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getEmbeddingNonAzureRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getImageGenerationRunner(Consumer<ImageGenerationOptions> testRunner) {
testRunner.accept(
new ImageGenerationOptions("A drawing of the Seattle skyline in the style of Van Gogh")
);
}
void getChatFunctionForNonAzureRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessagesWithFunction());
}
void getChatFunctionForRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-4-0613", getChatMessagesWithFunction());
}
void getChatCompletionsContentFilterRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-4", getChatMessages());
}
void getCompletionsContentFilterRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-003", "What is 3 times 4?");
}
void getChatCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessages());
}
void getCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-002", "What is 3 times 4?");
}
void getAudioTranscriptionRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "batman.wav");
}
void getAudioTranslationRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "JP_it_is_rainy_today.wav");
}
void getAudioTranscriptionRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "batman.wav");
}
void getAudioTranslationRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "JP_it_is_rainy_today.wav");
}
private List<ChatMessage> getChatMessages() {
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.SYSTEM, "You are a helpful assistant. You will talk like a pirate."));
chatMessages.add(new ChatMessage(ChatRole.USER, "Can you help me?"));
chatMessages.add(new ChatMessage(ChatRole.ASSISTANT, "Of course, me hearty! What can I do for ye?"));
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the best way to train a parrot?"));
return chatMessages;
}
private ChatCompletionsOptions getChatMessagesWithFunction() {
FunctionDefinition functionDefinition = new FunctionDefinition("MyFunction");
Parameters parameters = new Parameters();
functionDefinition.setParameters(parameters);
List<FunctionDefinition> functions = Arrays.asList(functionDefinition);
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the weather like in San Francisco in Celsius?"));
ChatCompletionsOptions chatCompletionOptions = new ChatCompletionsOptions(chatMessages);
chatCompletionOptions.setFunctions(functions);
return chatCompletionOptions;
}
static Path openTestResourceFile(String fileName) {
return Paths.get("src/test/resources/" + fileName);
}
static void assertCompletions(int choicesPerPrompt, Completions actual) {
assertCompletions(choicesPerPrompt, "stop", actual);
}
static void assertCompletions(int choicesPerPrompt, String expectedFinishReason, Completions actual) {
assertNotNull(actual);
assertInstanceOf(Completions.class, actual);
assertChoices(choicesPerPrompt, expectedFinishReason, actual.getChoices());
assertNotNull(actual.getUsage());
}
static <T> T assertAndGetValueFromResponse(Response<BinaryData> actualResponse, Class<T> clazz, int expectedCode) {
assertNotNull(actualResponse);
assertEquals(expectedCode, actualResponse.getStatusCode());
assertInstanceOf(Response.class, actualResponse);
BinaryData binaryData = actualResponse.getValue();
assertNotNull(binaryData);
T object = binaryData.toObject(clazz);
assertNotNull(object);
assertInstanceOf(clazz, object);
return object;
}
static void assertChoices(int choicesPerPrompt, String expectedFinishReason, List<Choice> actual) {
assertEquals(choicesPerPrompt, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChoice(i, expectedFinishReason, actual.get(i));
}
}
static void assertChoice(int index, String expectedFinishReason, Choice actual) {
assertNotNull(actual.getText());
assertEquals(index, actual.getIndex());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertChatCompletions(int choiceCount, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, "stop", ChatRole.ASSISTANT, choices);
assertNotNull(actual.getUsage());
}
static void assertChatCompletionsStream(ChatCompletions chatCompletions) {
if (chatCompletions.getId() != null && !chatCompletions.getId().isEmpty()) {
assertNotNull(chatCompletions.getId());
assertNotNull(chatCompletions.getChoices());
assertFalse(chatCompletions.getChoices().isEmpty());
assertNotNull(chatCompletions.getChoices().get(0).getDelta());
}
}
static void assertCompletionsStream(Completions completions) {
if (completions.getId() != null && !completions.getId().isEmpty()) {
assertNotNull(completions.getId());
assertNotNull(completions.getChoices());
assertFalse(completions.getChoices().isEmpty());
assertNotNull(completions.getChoices().get(0).getText());
}
}
static void assertChatCompletions(int choiceCount, String expectedFinishReason, ChatRole chatRole, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, expectedFinishReason, chatRole, choices);
assertNotNull(actual.getUsage());
}
static void assertChatChoices(int choiceCount, String expectedFinishReason, ChatRole chatRole, List<ChatChoice> actual) {
assertEquals(choiceCount, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChatChoice(i, expectedFinishReason, chatRole, actual.get(i));
}
}
static void assertChatChoice(int index, String expectedFinishReason, ChatRole chatRole, ChatChoice actual) {
assertEquals(index, actual.getIndex());
assertEquals(chatRole, actual.getMessage().getRole());
assertNotNull(actual.getMessage().getContent());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertEmbeddings(Embeddings actual) {
List<EmbeddingItem> data = actual.getData();
assertNotNull(data);
assertTrue(data.size() > 0);
for (EmbeddingItem item : data) {
List<Double> embedding = item.getEmbedding();
assertNotNull(embedding);
assertTrue(embedding.size() > 0);
}
assertNotNull(actual.getUsage());
}
static void assertImageResponse(ImageResponse actual) {
assertNotNull(actual.getData());
assertFalse(actual.getData().isEmpty());
}
static <T> T assertFunctionCall(ChatChoice actual, String functionName, Class<T> myPropertiesClazz) {
assertEquals(0, actual.getIndex());
assertEquals("function_call", actual.getFinishReason().toString());
FunctionCall functionCall = actual.getMessage().getFunctionCall();
assertEquals(functionName, functionCall.getName());
BinaryData argumentJson = BinaryData.fromString(functionCall.getArguments());
return argumentJson.toObject(myPropertiesClazz);
}
static void assertSafeContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertFalse(contentFilterResults.getHate().isFiltered());
assertEquals(contentFilterResults.getHate().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSexual().isFiltered());
assertEquals(contentFilterResults.getSexual().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSelfHarm().isFiltered());
assertEquals(contentFilterResults.getSelfHarm().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getViolence().isFiltered());
assertEquals(contentFilterResults.getViolence().getSeverity(), ContentFilterSeverity.SAFE);
}
static void assertEmptyContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertNull(contentFilterResults.getHate());
assertNull(contentFilterResults.getSexual());
assertNull(contentFilterResults.getViolence());
assertNull(contentFilterResults.getSelfHarm());
}
static void assertChatCompletionsCognitiveSearch(ChatCompletions chatCompletions) {
List<ChatChoice> choices = chatCompletions.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(1, CompletionsFinishReason.STOPPED.toString(), ChatRole.ASSISTANT, choices);
AzureChatExtensionsMessageContext messageContext = choices.get(0).getMessage().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
}
static void assertChatCompletionsStreamingCognitiveSearch(Stream<ChatCompletions> chatCompletionsStream) {
List<ChatCompletions> chatCompletions = chatCompletionsStream.collect(Collectors.toList());
assertTrue(chatCompletions.toArray().length > 1);
for (int i = 0; i < chatCompletions.size(); i++) {
ChatCompletions chatCompletion = chatCompletions.get(i);
List<ChatChoice> choices = chatCompletion.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
if (i == 0) {
AzureChatExtensionsMessageContext messageContext = choices.get(0).getDelta().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
} else if (i == 1) {
assertNull(choices.get(0).getDelta().getContext());
assertEquals(choices.get(0).getDelta().getRole(), ChatRole.ASSISTANT);
} else if (i == chatCompletions.size() - 1) {
assertEquals(choices.get(0).getFinishReason(), CompletionsFinishReason.STOPPED);
} else {
assertNotNull(choices.get(0).getDelta().getContent());
}
}
}
static void assertAudioTranscriptionSimpleJson(AudioTranscription transcription, String expectedText) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNull(transcription.getDuration());
assertNull(transcription.getLanguage());
assertNull(transcription.getTask());
assertNull(transcription.getSegments());
}
static void assertAudioTranscriptionVerboseJson(AudioTranscription transcription, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNotNull(transcription.getDuration());
assertNotNull(transcription.getLanguage());
assertEquals(audioTaskLabel, transcription.getTask());
assertNotNull(transcription.getSegments());
assertFalse(transcription.getSegments().isEmpty());
}
static void assertAudioTranslationSimpleJson(AudioTranslation translation, String expectedText) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNull(translation.getDuration());
assertNull(translation.getLanguage());
assertNull(translation.getTask());
assertNull(translation.getSegments());
}
static void assertAudioTranslationVerboseJson(AudioTranslation translation, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNotNull(translation.getDuration());
assertNotNull(translation.getLanguage());
assertEquals(audioTaskLabel, translation.getTask());
assertNotNull(translation.getSegments());
assertFalse(translation.getSegments().isEmpty());
}
protected static final String BATMAN_TRANSCRIPTION =
"Skills and Abilities. Batman has no inherent superpowers. He relies on his own "
+ "scientific knowledge, detective skills, and athletic prowess. In the stories, Batman is "
+ "regarded as one of the world's greatest detectives, if not the world's greatest "
+ "crime solver. Batman has been repeatedly described as having genius-level intellect, one of"
+ " the greatest martial artists in the DC universe, and having peak human physical "
+ "conditioning. He has traveled the world acquiring the skills needed to aid his crusade "
+ "against crime. His knowledge and expertise in almost every discipline known to man is nearly "
+ "unparalleled by any other character in the universe. Batman's inexhaustible wealth allows "
+ "him to access advanced technology. As a proficient scientist, he is able to use and modify "
+ "those technologies to his advantage. Batman describes Superman as the most dangerous man on "
+ "earth, able to defeat a team of super-powered extraterrestrials by himself in order to "
+ "rescue his imprisoned teammates in the first storyline. Superman also considers Batman "
+ "to be one of the most brilliant minds on the planet. Batman has the ability to function "
+ "under great physical pain and withstand mind control. He is a master of disguise, multilingual, "
+ "and an expert in espionage, often gathering information under different identities. "
+ "Batman's karate, judo, and jujitsu training has made him a master of stealth and escape, "
+ "allowing him to appear and disappear at will, and to break free from the chains of his past.";
} |
```suggestion matchers.add(TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS); ``` | OpenAIClientBuilder getNonAzureOpenAIClientBuilder(HttpClient httpClient) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpClient(httpClient);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
builder
.credential(new KeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
} else {
builder
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
}
return builder;
} | matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS)); | OpenAIClientBuilder getNonAzureOpenAIClientBuilder(HttpClient httpClient) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpClient(httpClient);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
builder
.credential(new KeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
} else {
builder
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
}
return builder;
} | class OpenAIClientTestBase extends TestProxyTestBase {
OpenAIClientBuilder getOpenAIClientBuilder(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.httpClient(httpClient)
.serviceVersion(serviceVersion);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
builder
.endpoint("https:
.credential(new AzureKeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
} else {
builder
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
}
return builder;
}
private void addTestRecordCustomSanitizers() {
interceptorManager.addSanitizers(Arrays.asList(
new TestProxySanitizer("$..key", null, "REDACTED", TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..endpoint", null, "https:
new TestProxySanitizer("Content-Type", "(^multipart\\/form-data; boundary=[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{2})",
"multipart\\/form-data; boundary=REDACTED", TestProxySanitizerType.HEADER)
));
}
protected String getAzureCognitiveSearchKey() {
String azureCognitiveSearchKey = Configuration.getGlobalConfiguration().get("ACS_BYOD_API_KEY");
if (getTestMode() == TestMode.PLAYBACK) {
return FAKE_API_KEY;
} else if (azureCognitiveSearchKey != null) {
return azureCognitiveSearchKey;
} else {
throw new IllegalStateException(
"No Azure Cognitive Search API key found. "
+ "Please set the appropriate environment variable to use this value.");
}
}
@Test
public abstract void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
void getCompletionsRunner(BiConsumer<String, List<String>> testRunner) {
String deploymentId = "text-davinci-003";
List<String> prompt = new ArrayList<>();
prompt.add("Say this is a test");
testRunner.accept(deploymentId, prompt);
}
void getCompletionsFromSinglePromptRunner(BiConsumer<String, String> testRunner) {
String deploymentId = "text-davinci-003";
String prompt = "Say this is a test";
testRunner.accept(deploymentId, prompt);
}
void getChatCompletionsRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-35-turbo", getChatMessages());
}
void getChatCompletionsForNonAzureRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo", getChatMessages());
}
void getChatCompletionsAzureChatSearchRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
ChatCompletionsOptions chatCompletionsOptions = new ChatCompletionsOptions(
Arrays.asList(new ChatMessage(ChatRole.USER, "What does PR complete mean?")));
testRunner.accept("gpt-4-0613", chatCompletionsOptions);
}
void getEmbeddingRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getEmbeddingNonAzureRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getImageGenerationRunner(Consumer<ImageGenerationOptions> testRunner) {
testRunner.accept(
new ImageGenerationOptions("A drawing of the Seattle skyline in the style of Van Gogh")
);
}
void getChatFunctionForNonAzureRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessagesWithFunction());
}
void getChatFunctionForRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-4-0613", getChatMessagesWithFunction());
}
void getChatCompletionsContentFilterRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-4", getChatMessages());
}
void getCompletionsContentFilterRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-003", "What is 3 times 4?");
}
void getChatCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessages());
}
void getCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-002", "What is 3 times 4?");
}
void getAudioTranscriptionRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "batman.wav");
}
void getAudioTranslationRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "JP_it_is_rainy_today.wav");
}
void getAudioTranscriptionRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "batman.wav");
}
void getAudioTranslationRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "JP_it_is_rainy_today.wav");
}
private List<ChatMessage> getChatMessages() {
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.SYSTEM, "You are a helpful assistant. You will talk like a pirate."));
chatMessages.add(new ChatMessage(ChatRole.USER, "Can you help me?"));
chatMessages.add(new ChatMessage(ChatRole.ASSISTANT, "Of course, me hearty! What can I do for ye?"));
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the best way to train a parrot?"));
return chatMessages;
}
private ChatCompletionsOptions getChatMessagesWithFunction() {
FunctionDefinition functionDefinition = new FunctionDefinition("MyFunction");
Parameters parameters = new Parameters();
functionDefinition.setParameters(parameters);
List<FunctionDefinition> functions = Arrays.asList(functionDefinition);
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the weather like in San Francisco in Celsius?"));
ChatCompletionsOptions chatCompletionOptions = new ChatCompletionsOptions(chatMessages);
chatCompletionOptions.setFunctions(functions);
return chatCompletionOptions;
}
static Path openTestResourceFile(String fileName) {
return Paths.get("src/test/resources/" + fileName);
}
static void assertCompletions(int choicesPerPrompt, Completions actual) {
assertCompletions(choicesPerPrompt, "stop", actual);
}
static void assertCompletions(int choicesPerPrompt, String expectedFinishReason, Completions actual) {
assertNotNull(actual);
assertInstanceOf(Completions.class, actual);
assertChoices(choicesPerPrompt, expectedFinishReason, actual.getChoices());
assertNotNull(actual.getUsage());
}
static <T> T assertAndGetValueFromResponse(Response<BinaryData> actualResponse, Class<T> clazz, int expectedCode) {
assertNotNull(actualResponse);
assertEquals(expectedCode, actualResponse.getStatusCode());
assertInstanceOf(Response.class, actualResponse);
BinaryData binaryData = actualResponse.getValue();
assertNotNull(binaryData);
T object = binaryData.toObject(clazz);
assertNotNull(object);
assertInstanceOf(clazz, object);
return object;
}
static void assertChoices(int choicesPerPrompt, String expectedFinishReason, List<Choice> actual) {
assertEquals(choicesPerPrompt, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChoice(i, expectedFinishReason, actual.get(i));
}
}
static void assertChoice(int index, String expectedFinishReason, Choice actual) {
assertNotNull(actual.getText());
assertEquals(index, actual.getIndex());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertChatCompletions(int choiceCount, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, "stop", ChatRole.ASSISTANT, choices);
assertNotNull(actual.getUsage());
}
static void assertChatCompletionsStream(ChatCompletions chatCompletions) {
if (chatCompletions.getId() != null && !chatCompletions.getId().isEmpty()) {
assertNotNull(chatCompletions.getId());
assertNotNull(chatCompletions.getChoices());
assertFalse(chatCompletions.getChoices().isEmpty());
assertNotNull(chatCompletions.getChoices().get(0).getDelta());
}
}
static void assertCompletionsStream(Completions completions) {
if (completions.getId() != null && !completions.getId().isEmpty()) {
assertNotNull(completions.getId());
assertNotNull(completions.getChoices());
assertFalse(completions.getChoices().isEmpty());
assertNotNull(completions.getChoices().get(0).getText());
}
}
static void assertChatCompletions(int choiceCount, String expectedFinishReason, ChatRole chatRole, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, expectedFinishReason, chatRole, choices);
assertNotNull(actual.getUsage());
}
static void assertChatChoices(int choiceCount, String expectedFinishReason, ChatRole chatRole, List<ChatChoice> actual) {
assertEquals(choiceCount, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChatChoice(i, expectedFinishReason, chatRole, actual.get(i));
}
}
static void assertChatChoice(int index, String expectedFinishReason, ChatRole chatRole, ChatChoice actual) {
assertEquals(index, actual.getIndex());
assertEquals(chatRole, actual.getMessage().getRole());
assertNotNull(actual.getMessage().getContent());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertEmbeddings(Embeddings actual) {
List<EmbeddingItem> data = actual.getData();
assertNotNull(data);
assertTrue(data.size() > 0);
for (EmbeddingItem item : data) {
List<Double> embedding = item.getEmbedding();
assertNotNull(embedding);
assertTrue(embedding.size() > 0);
}
assertNotNull(actual.getUsage());
}
static void assertImageResponse(ImageResponse actual) {
assertNotNull(actual.getData());
assertFalse(actual.getData().isEmpty());
}
static <T> T assertFunctionCall(ChatChoice actual, String functionName, Class<T> myPropertiesClazz) {
assertEquals(0, actual.getIndex());
assertEquals("function_call", actual.getFinishReason().toString());
FunctionCall functionCall = actual.getMessage().getFunctionCall();
assertEquals(functionName, functionCall.getName());
BinaryData argumentJson = BinaryData.fromString(functionCall.getArguments());
return argumentJson.toObject(myPropertiesClazz);
}
static void assertSafeContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertFalse(contentFilterResults.getHate().isFiltered());
assertEquals(contentFilterResults.getHate().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSexual().isFiltered());
assertEquals(contentFilterResults.getSexual().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSelfHarm().isFiltered());
assertEquals(contentFilterResults.getSelfHarm().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getViolence().isFiltered());
assertEquals(contentFilterResults.getViolence().getSeverity(), ContentFilterSeverity.SAFE);
}
static void assertEmptyContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertNull(contentFilterResults.getHate());
assertNull(contentFilterResults.getSexual());
assertNull(contentFilterResults.getViolence());
assertNull(contentFilterResults.getSelfHarm());
}
static void assertChatCompletionsCognitiveSearch(ChatCompletions chatCompletions) {
List<ChatChoice> choices = chatCompletions.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(1, CompletionsFinishReason.STOPPED.toString(), ChatRole.ASSISTANT, choices);
AzureChatExtensionsMessageContext messageContext = choices.get(0).getMessage().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
}
static void assertChatCompletionsStreamingCognitiveSearch(Stream<ChatCompletions> chatCompletionsStream) {
List<ChatCompletions> chatCompletions = chatCompletionsStream.collect(Collectors.toList());
assertTrue(chatCompletions.toArray().length > 1);
for (int i = 0; i < chatCompletions.size(); i++) {
ChatCompletions chatCompletion = chatCompletions.get(i);
List<ChatChoice> choices = chatCompletion.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
if (i == 0) {
AzureChatExtensionsMessageContext messageContext = choices.get(0).getDelta().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
} else if (i == 1) {
assertNull(choices.get(0).getDelta().getContext());
assertEquals(choices.get(0).getDelta().getRole(), ChatRole.ASSISTANT);
} else if (i == chatCompletions.size() - 1) {
assertEquals(choices.get(0).getFinishReason(), CompletionsFinishReason.STOPPED);
} else {
assertNotNull(choices.get(0).getDelta().getContent());
}
}
}
static void assertAudioTranscriptionSimpleJson(AudioTranscription transcription, String expectedText) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNull(transcription.getDuration());
assertNull(transcription.getLanguage());
assertNull(transcription.getTask());
assertNull(transcription.getSegments());
}
static void assertAudioTranscriptionVerboseJson(AudioTranscription transcription, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNotNull(transcription.getDuration());
assertNotNull(transcription.getLanguage());
assertEquals(audioTaskLabel, transcription.getTask());
assertNotNull(transcription.getSegments());
assertFalse(transcription.getSegments().isEmpty());
}
static void assertAudioTranslationSimpleJson(AudioTranslation translation, String expectedText) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNull(translation.getDuration());
assertNull(translation.getLanguage());
assertNull(translation.getTask());
assertNull(translation.getSegments());
}
static void assertAudioTranslationVerboseJson(AudioTranslation translation, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNotNull(translation.getDuration());
assertNotNull(translation.getLanguage());
assertEquals(audioTaskLabel, translation.getTask());
assertNotNull(translation.getSegments());
assertFalse(translation.getSegments().isEmpty());
}
protected static final String BATMAN_TRANSCRIPTION =
"Skills and Abilities. Batman has no inherent superpowers. He relies on his own "
+ "scientific knowledge, detective skills, and athletic prowess. In the stories, Batman is "
+ "regarded as one of the world's greatest detectives, if not the world's greatest "
+ "crime solver. Batman has been repeatedly described as having genius-level intellect, one of"
+ " the greatest martial artists in the DC universe, and having peak human physical "
+ "conditioning. He has traveled the world acquiring the skills needed to aid his crusade "
+ "against crime. His knowledge and expertise in almost every discipline known to man is nearly "
+ "unparalleled by any other character in the universe. Batman's inexhaustible wealth allows "
+ "him to access advanced technology. As a proficient scientist, he is able to use and modify "
+ "those technologies to his advantage. Batman describes Superman as the most dangerous man on "
+ "earth, able to defeat a team of super-powered extraterrestrials by himself in order to "
+ "rescue his imprisoned teammates in the first storyline. Superman also considers Batman "
+ "to be one of the most brilliant minds on the planet. Batman has the ability to function "
+ "under great physical pain and withstand mind control. He is a master of disguise, multilingual, "
+ "and an expert in espionage, often gathering information under different identities. "
+ "Batman's karate, judo, and jujitsu training has made him a master of stealth and escape, "
+ "allowing him to appear and disappear at will, and to break free from the chains of his past.";
} | class OpenAIClientTestBase extends TestProxyTestBase {
OpenAIClientBuilder getOpenAIClientBuilder(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.httpClient(httpClient)
.serviceVersion(serviceVersion);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
builder
.endpoint("https:
.credential(new AzureKeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
} else {
builder
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
}
return builder;
}
private void addTestRecordCustomSanitizers() {
interceptorManager.addSanitizers(Arrays.asList(
new TestProxySanitizer("$..key", null, "REDACTED", TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..endpoint", null, "https:
new TestProxySanitizer("Content-Type", "(^multipart\\/form-data; boundary=[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{2})",
"multipart\\/form-data; boundary=BOUNDARY", TestProxySanitizerType.HEADER)
));
}
protected String getAzureCognitiveSearchKey() {
String azureCognitiveSearchKey = Configuration.getGlobalConfiguration().get("ACS_BYOD_API_KEY");
if (getTestMode() == TestMode.PLAYBACK) {
return FAKE_API_KEY;
} else if (azureCognitiveSearchKey != null) {
return azureCognitiveSearchKey;
} else {
throw new IllegalStateException(
"No Azure Cognitive Search API key found. "
+ "Please set the appropriate environment variable to use this value.");
}
}
@Test
public abstract void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
void getCompletionsRunner(BiConsumer<String, List<String>> testRunner) {
String deploymentId = "text-davinci-003";
List<String> prompt = new ArrayList<>();
prompt.add("Say this is a test");
testRunner.accept(deploymentId, prompt);
}
void getCompletionsFromSinglePromptRunner(BiConsumer<String, String> testRunner) {
String deploymentId = "text-davinci-003";
String prompt = "Say this is a test";
testRunner.accept(deploymentId, prompt);
}
void getChatCompletionsRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-35-turbo", getChatMessages());
}
void getChatCompletionsForNonAzureRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo", getChatMessages());
}
void getChatCompletionsAzureChatSearchRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
ChatCompletionsOptions chatCompletionsOptions = new ChatCompletionsOptions(
Arrays.asList(new ChatMessage(ChatRole.USER, "What does PR complete mean?")));
testRunner.accept("gpt-4-0613", chatCompletionsOptions);
}
void getEmbeddingRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getEmbeddingNonAzureRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getImageGenerationRunner(Consumer<ImageGenerationOptions> testRunner) {
testRunner.accept(
new ImageGenerationOptions("A drawing of the Seattle skyline in the style of Van Gogh")
);
}
void getChatFunctionForNonAzureRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessagesWithFunction());
}
void getChatFunctionForRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-4-0613", getChatMessagesWithFunction());
}
void getChatCompletionsContentFilterRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-4", getChatMessages());
}
void getCompletionsContentFilterRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-003", "What is 3 times 4?");
}
void getChatCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessages());
}
void getCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-002", "What is 3 times 4?");
}
void getAudioTranscriptionRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "batman.wav");
}
void getAudioTranslationRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "JP_it_is_rainy_today.wav");
}
void getAudioTranscriptionRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "batman.wav");
}
void getAudioTranslationRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "JP_it_is_rainy_today.wav");
}
private List<ChatMessage> getChatMessages() {
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.SYSTEM, "You are a helpful assistant. You will talk like a pirate."));
chatMessages.add(new ChatMessage(ChatRole.USER, "Can you help me?"));
chatMessages.add(new ChatMessage(ChatRole.ASSISTANT, "Of course, me hearty! What can I do for ye?"));
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the best way to train a parrot?"));
return chatMessages;
}
private ChatCompletionsOptions getChatMessagesWithFunction() {
FunctionDefinition functionDefinition = new FunctionDefinition("MyFunction");
Parameters parameters = new Parameters();
functionDefinition.setParameters(parameters);
List<FunctionDefinition> functions = Arrays.asList(functionDefinition);
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the weather like in San Francisco in Celsius?"));
ChatCompletionsOptions chatCompletionOptions = new ChatCompletionsOptions(chatMessages);
chatCompletionOptions.setFunctions(functions);
return chatCompletionOptions;
}
static Path openTestResourceFile(String fileName) {
return Paths.get("src/test/resources/" + fileName);
}
static void assertCompletions(int choicesPerPrompt, Completions actual) {
assertCompletions(choicesPerPrompt, "stop", actual);
}
static void assertCompletions(int choicesPerPrompt, String expectedFinishReason, Completions actual) {
assertNotNull(actual);
assertInstanceOf(Completions.class, actual);
assertChoices(choicesPerPrompt, expectedFinishReason, actual.getChoices());
assertNotNull(actual.getUsage());
}
static <T> T assertAndGetValueFromResponse(Response<BinaryData> actualResponse, Class<T> clazz, int expectedCode) {
assertNotNull(actualResponse);
assertEquals(expectedCode, actualResponse.getStatusCode());
assertInstanceOf(Response.class, actualResponse);
BinaryData binaryData = actualResponse.getValue();
assertNotNull(binaryData);
T object = binaryData.toObject(clazz);
assertNotNull(object);
assertInstanceOf(clazz, object);
return object;
}
static void assertChoices(int choicesPerPrompt, String expectedFinishReason, List<Choice> actual) {
assertEquals(choicesPerPrompt, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChoice(i, expectedFinishReason, actual.get(i));
}
}
static void assertChoice(int index, String expectedFinishReason, Choice actual) {
assertNotNull(actual.getText());
assertEquals(index, actual.getIndex());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertChatCompletions(int choiceCount, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, "stop", ChatRole.ASSISTANT, choices);
assertNotNull(actual.getUsage());
}
static void assertChatCompletionsStream(ChatCompletions chatCompletions) {
if (chatCompletions.getId() != null && !chatCompletions.getId().isEmpty()) {
assertNotNull(chatCompletions.getId());
assertNotNull(chatCompletions.getChoices());
assertFalse(chatCompletions.getChoices().isEmpty());
assertNotNull(chatCompletions.getChoices().get(0).getDelta());
}
}
static void assertCompletionsStream(Completions completions) {
if (completions.getId() != null && !completions.getId().isEmpty()) {
assertNotNull(completions.getId());
assertNotNull(completions.getChoices());
assertFalse(completions.getChoices().isEmpty());
assertNotNull(completions.getChoices().get(0).getText());
}
}
static void assertChatCompletions(int choiceCount, String expectedFinishReason, ChatRole chatRole, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, expectedFinishReason, chatRole, choices);
assertNotNull(actual.getUsage());
}
static void assertChatChoices(int choiceCount, String expectedFinishReason, ChatRole chatRole, List<ChatChoice> actual) {
assertEquals(choiceCount, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChatChoice(i, expectedFinishReason, chatRole, actual.get(i));
}
}
static void assertChatChoice(int index, String expectedFinishReason, ChatRole chatRole, ChatChoice actual) {
assertEquals(index, actual.getIndex());
assertEquals(chatRole, actual.getMessage().getRole());
assertNotNull(actual.getMessage().getContent());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertEmbeddings(Embeddings actual) {
List<EmbeddingItem> data = actual.getData();
assertNotNull(data);
assertTrue(data.size() > 0);
for (EmbeddingItem item : data) {
List<Double> embedding = item.getEmbedding();
assertNotNull(embedding);
assertTrue(embedding.size() > 0);
}
assertNotNull(actual.getUsage());
}
static void assertImageResponse(ImageResponse actual) {
assertNotNull(actual.getData());
assertFalse(actual.getData().isEmpty());
}
static <T> T assertFunctionCall(ChatChoice actual, String functionName, Class<T> myPropertiesClazz) {
assertEquals(0, actual.getIndex());
assertEquals("function_call", actual.getFinishReason().toString());
FunctionCall functionCall = actual.getMessage().getFunctionCall();
assertEquals(functionName, functionCall.getName());
BinaryData argumentJson = BinaryData.fromString(functionCall.getArguments());
return argumentJson.toObject(myPropertiesClazz);
}
static void assertSafeContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertFalse(contentFilterResults.getHate().isFiltered());
assertEquals(contentFilterResults.getHate().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSexual().isFiltered());
assertEquals(contentFilterResults.getSexual().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSelfHarm().isFiltered());
assertEquals(contentFilterResults.getSelfHarm().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getViolence().isFiltered());
assertEquals(contentFilterResults.getViolence().getSeverity(), ContentFilterSeverity.SAFE);
}
static void assertEmptyContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertNull(contentFilterResults.getHate());
assertNull(contentFilterResults.getSexual());
assertNull(contentFilterResults.getViolence());
assertNull(contentFilterResults.getSelfHarm());
}
static void assertChatCompletionsCognitiveSearch(ChatCompletions chatCompletions) {
List<ChatChoice> choices = chatCompletions.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(1, CompletionsFinishReason.STOPPED.toString(), ChatRole.ASSISTANT, choices);
AzureChatExtensionsMessageContext messageContext = choices.get(0).getMessage().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
}
static void assertChatCompletionsStreamingCognitiveSearch(Stream<ChatCompletions> chatCompletionsStream) {
List<ChatCompletions> chatCompletions = chatCompletionsStream.collect(Collectors.toList());
assertTrue(chatCompletions.toArray().length > 1);
for (int i = 0; i < chatCompletions.size(); i++) {
ChatCompletions chatCompletion = chatCompletions.get(i);
List<ChatChoice> choices = chatCompletion.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
if (i == 0) {
AzureChatExtensionsMessageContext messageContext = choices.get(0).getDelta().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
} else if (i == 1) {
assertNull(choices.get(0).getDelta().getContext());
assertEquals(choices.get(0).getDelta().getRole(), ChatRole.ASSISTANT);
} else if (i == chatCompletions.size() - 1) {
assertEquals(choices.get(0).getFinishReason(), CompletionsFinishReason.STOPPED);
} else {
assertNotNull(choices.get(0).getDelta().getContent());
}
}
}
static void assertAudioTranscriptionSimpleJson(AudioTranscription transcription, String expectedText) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNull(transcription.getDuration());
assertNull(transcription.getLanguage());
assertNull(transcription.getTask());
assertNull(transcription.getSegments());
}
static void assertAudioTranscriptionVerboseJson(AudioTranscription transcription, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNotNull(transcription.getDuration());
assertNotNull(transcription.getLanguage());
assertEquals(audioTaskLabel, transcription.getTask());
assertNotNull(transcription.getSegments());
assertFalse(transcription.getSegments().isEmpty());
}
static void assertAudioTranslationSimpleJson(AudioTranslation translation, String expectedText) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNull(translation.getDuration());
assertNull(translation.getLanguage());
assertNull(translation.getTask());
assertNull(translation.getSegments());
}
static void assertAudioTranslationVerboseJson(AudioTranslation translation, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNotNull(translation.getDuration());
assertNotNull(translation.getLanguage());
assertEquals(audioTaskLabel, translation.getTask());
assertNotNull(translation.getSegments());
assertFalse(translation.getSegments().isEmpty());
}
protected static final String BATMAN_TRANSCRIPTION =
"Skills and Abilities. Batman has no inherent superpowers. He relies on his own "
+ "scientific knowledge, detective skills, and athletic prowess. In the stories, Batman is "
+ "regarded as one of the world's greatest detectives, if not the world's greatest "
+ "crime solver. Batman has been repeatedly described as having genius-level intellect, one of"
+ " the greatest martial artists in the DC universe, and having peak human physical "
+ "conditioning. He has traveled the world acquiring the skills needed to aid his crusade "
+ "against crime. His knowledge and expertise in almost every discipline known to man is nearly "
+ "unparalleled by any other character in the universe. Batman's inexhaustible wealth allows "
+ "him to access advanced technology. As a proficient scientist, he is able to use and modify "
+ "those technologies to his advantage. Batman describes Superman as the most dangerous man on "
+ "earth, able to defeat a team of super-powered extraterrestrials by himself in order to "
+ "rescue his imprisoned teammates in the first storyline. Superman also considers Batman "
+ "to be one of the most brilliant minds on the planet. Batman has the ability to function "
+ "under great physical pain and withstand mind control. He is a master of disguise, multilingual, "
+ "and an expert in espionage, often gathering information under different identities. "
+ "Batman's karate, judo, and jujitsu training has made him a master of stealth and escape, "
+ "allowing him to appear and disappear at will, and to break free from the chains of his past.";
} |
Since the request body is form data, you can use RecordWithoutRequestBody | private void addTestRecordCustomSanitizers() {
interceptorManager.addSanitizers(Arrays.asList(
new TestProxySanitizer("$..key", null, "REDACTED", TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..endpoint", null, "https:
new TestProxySanitizer("Content-Type", "(^multipart\\/form-data; boundary=[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{2})",
"multipart\\/form-data; boundary=REDACTED", TestProxySanitizerType.HEADER)
));
} | new TestProxySanitizer("Content-Type", "(^multipart\\/form-data; boundary=[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{2})", | private void addTestRecordCustomSanitizers() {
interceptorManager.addSanitizers(Arrays.asList(
new TestProxySanitizer("$..key", null, "REDACTED", TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..endpoint", null, "https:
new TestProxySanitizer("Content-Type", "(^multipart\\/form-data; boundary=[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{2})",
"multipart\\/form-data; boundary=BOUNDARY", TestProxySanitizerType.HEADER)
));
} | class OpenAIClientTestBase extends TestProxyTestBase {
OpenAIClientBuilder getOpenAIClientBuilder(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.httpClient(httpClient)
.serviceVersion(serviceVersion);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
builder
.endpoint("https:
.credential(new AzureKeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
} else {
builder
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
}
return builder;
}
OpenAIClientBuilder getNonAzureOpenAIClientBuilder(HttpClient httpClient) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpClient(httpClient);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
builder
.credential(new KeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
} else {
builder
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
}
return builder;
}
protected String getAzureCognitiveSearchKey() {
String azureCognitiveSearchKey = Configuration.getGlobalConfiguration().get("ACS_BYOD_API_KEY");
if (getTestMode() == TestMode.PLAYBACK) {
return FAKE_API_KEY;
} else if (azureCognitiveSearchKey != null) {
return azureCognitiveSearchKey;
} else {
throw new IllegalStateException(
"No Azure Cognitive Search API key found. "
+ "Please set the appropriate environment variable to use this value.");
}
}
@Test
public abstract void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
void getCompletionsRunner(BiConsumer<String, List<String>> testRunner) {
String deploymentId = "text-davinci-003";
List<String> prompt = new ArrayList<>();
prompt.add("Say this is a test");
testRunner.accept(deploymentId, prompt);
}
void getCompletionsFromSinglePromptRunner(BiConsumer<String, String> testRunner) {
String deploymentId = "text-davinci-003";
String prompt = "Say this is a test";
testRunner.accept(deploymentId, prompt);
}
void getChatCompletionsRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-35-turbo", getChatMessages());
}
void getChatCompletionsForNonAzureRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo", getChatMessages());
}
void getChatCompletionsAzureChatSearchRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
ChatCompletionsOptions chatCompletionsOptions = new ChatCompletionsOptions(
Arrays.asList(new ChatMessage(ChatRole.USER, "What does PR complete mean?")));
testRunner.accept("gpt-4-0613", chatCompletionsOptions);
}
void getEmbeddingRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getEmbeddingNonAzureRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getImageGenerationRunner(Consumer<ImageGenerationOptions> testRunner) {
testRunner.accept(
new ImageGenerationOptions("A drawing of the Seattle skyline in the style of Van Gogh")
);
}
void getChatFunctionForNonAzureRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessagesWithFunction());
}
void getChatFunctionForRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-4-0613", getChatMessagesWithFunction());
}
void getChatCompletionsContentFilterRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-4", getChatMessages());
}
void getCompletionsContentFilterRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-003", "What is 3 times 4?");
}
void getChatCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessages());
}
void getCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-002", "What is 3 times 4?");
}
void getAudioTranscriptionRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "batman.wav");
}
void getAudioTranslationRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "JP_it_is_rainy_today.wav");
}
void getAudioTranscriptionRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "batman.wav");
}
void getAudioTranslationRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "JP_it_is_rainy_today.wav");
}
private List<ChatMessage> getChatMessages() {
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.SYSTEM, "You are a helpful assistant. You will talk like a pirate."));
chatMessages.add(new ChatMessage(ChatRole.USER, "Can you help me?"));
chatMessages.add(new ChatMessage(ChatRole.ASSISTANT, "Of course, me hearty! What can I do for ye?"));
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the best way to train a parrot?"));
return chatMessages;
}
private ChatCompletionsOptions getChatMessagesWithFunction() {
FunctionDefinition functionDefinition = new FunctionDefinition("MyFunction");
Parameters parameters = new Parameters();
functionDefinition.setParameters(parameters);
List<FunctionDefinition> functions = Arrays.asList(functionDefinition);
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the weather like in San Francisco in Celsius?"));
ChatCompletionsOptions chatCompletionOptions = new ChatCompletionsOptions(chatMessages);
chatCompletionOptions.setFunctions(functions);
return chatCompletionOptions;
}
static Path openTestResourceFile(String fileName) {
return Paths.get("src/test/resources/" + fileName);
}
static void assertCompletions(int choicesPerPrompt, Completions actual) {
assertCompletions(choicesPerPrompt, "stop", actual);
}
static void assertCompletions(int choicesPerPrompt, String expectedFinishReason, Completions actual) {
assertNotNull(actual);
assertInstanceOf(Completions.class, actual);
assertChoices(choicesPerPrompt, expectedFinishReason, actual.getChoices());
assertNotNull(actual.getUsage());
}
static <T> T assertAndGetValueFromResponse(Response<BinaryData> actualResponse, Class<T> clazz, int expectedCode) {
assertNotNull(actualResponse);
assertEquals(expectedCode, actualResponse.getStatusCode());
assertInstanceOf(Response.class, actualResponse);
BinaryData binaryData = actualResponse.getValue();
assertNotNull(binaryData);
T object = binaryData.toObject(clazz);
assertNotNull(object);
assertInstanceOf(clazz, object);
return object;
}
static void assertChoices(int choicesPerPrompt, String expectedFinishReason, List<Choice> actual) {
assertEquals(choicesPerPrompt, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChoice(i, expectedFinishReason, actual.get(i));
}
}
static void assertChoice(int index, String expectedFinishReason, Choice actual) {
assertNotNull(actual.getText());
assertEquals(index, actual.getIndex());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertChatCompletions(int choiceCount, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, "stop", ChatRole.ASSISTANT, choices);
assertNotNull(actual.getUsage());
}
static void assertChatCompletionsStream(ChatCompletions chatCompletions) {
if (chatCompletions.getId() != null && !chatCompletions.getId().isEmpty()) {
assertNotNull(chatCompletions.getId());
assertNotNull(chatCompletions.getChoices());
assertFalse(chatCompletions.getChoices().isEmpty());
assertNotNull(chatCompletions.getChoices().get(0).getDelta());
}
}
static void assertCompletionsStream(Completions completions) {
if (completions.getId() != null && !completions.getId().isEmpty()) {
assertNotNull(completions.getId());
assertNotNull(completions.getChoices());
assertFalse(completions.getChoices().isEmpty());
assertNotNull(completions.getChoices().get(0).getText());
}
}
static void assertChatCompletions(int choiceCount, String expectedFinishReason, ChatRole chatRole, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, expectedFinishReason, chatRole, choices);
assertNotNull(actual.getUsage());
}
static void assertChatChoices(int choiceCount, String expectedFinishReason, ChatRole chatRole, List<ChatChoice> actual) {
assertEquals(choiceCount, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChatChoice(i, expectedFinishReason, chatRole, actual.get(i));
}
}
static void assertChatChoice(int index, String expectedFinishReason, ChatRole chatRole, ChatChoice actual) {
assertEquals(index, actual.getIndex());
assertEquals(chatRole, actual.getMessage().getRole());
assertNotNull(actual.getMessage().getContent());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertEmbeddings(Embeddings actual) {
List<EmbeddingItem> data = actual.getData();
assertNotNull(data);
assertTrue(data.size() > 0);
for (EmbeddingItem item : data) {
List<Double> embedding = item.getEmbedding();
assertNotNull(embedding);
assertTrue(embedding.size() > 0);
}
assertNotNull(actual.getUsage());
}
static void assertImageResponse(ImageResponse actual) {
assertNotNull(actual.getData());
assertFalse(actual.getData().isEmpty());
}
static <T> T assertFunctionCall(ChatChoice actual, String functionName, Class<T> myPropertiesClazz) {
assertEquals(0, actual.getIndex());
assertEquals("function_call", actual.getFinishReason().toString());
FunctionCall functionCall = actual.getMessage().getFunctionCall();
assertEquals(functionName, functionCall.getName());
BinaryData argumentJson = BinaryData.fromString(functionCall.getArguments());
return argumentJson.toObject(myPropertiesClazz);
}
static void assertSafeContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertFalse(contentFilterResults.getHate().isFiltered());
assertEquals(contentFilterResults.getHate().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSexual().isFiltered());
assertEquals(contentFilterResults.getSexual().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSelfHarm().isFiltered());
assertEquals(contentFilterResults.getSelfHarm().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getViolence().isFiltered());
assertEquals(contentFilterResults.getViolence().getSeverity(), ContentFilterSeverity.SAFE);
}
static void assertEmptyContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertNull(contentFilterResults.getHate());
assertNull(contentFilterResults.getSexual());
assertNull(contentFilterResults.getViolence());
assertNull(contentFilterResults.getSelfHarm());
}
static void assertChatCompletionsCognitiveSearch(ChatCompletions chatCompletions) {
List<ChatChoice> choices = chatCompletions.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(1, CompletionsFinishReason.STOPPED.toString(), ChatRole.ASSISTANT, choices);
AzureChatExtensionsMessageContext messageContext = choices.get(0).getMessage().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
}
static void assertChatCompletionsStreamingCognitiveSearch(Stream<ChatCompletions> chatCompletionsStream) {
List<ChatCompletions> chatCompletions = chatCompletionsStream.collect(Collectors.toList());
assertTrue(chatCompletions.toArray().length > 1);
for (int i = 0; i < chatCompletions.size(); i++) {
ChatCompletions chatCompletion = chatCompletions.get(i);
List<ChatChoice> choices = chatCompletion.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
if (i == 0) {
AzureChatExtensionsMessageContext messageContext = choices.get(0).getDelta().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
} else if (i == 1) {
assertNull(choices.get(0).getDelta().getContext());
assertEquals(choices.get(0).getDelta().getRole(), ChatRole.ASSISTANT);
} else if (i == chatCompletions.size() - 1) {
assertEquals(choices.get(0).getFinishReason(), CompletionsFinishReason.STOPPED);
} else {
assertNotNull(choices.get(0).getDelta().getContent());
}
}
}
static void assertAudioTranscriptionSimpleJson(AudioTranscription transcription, String expectedText) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNull(transcription.getDuration());
assertNull(transcription.getLanguage());
assertNull(transcription.getTask());
assertNull(transcription.getSegments());
}
static void assertAudioTranscriptionVerboseJson(AudioTranscription transcription, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNotNull(transcription.getDuration());
assertNotNull(transcription.getLanguage());
assertEquals(audioTaskLabel, transcription.getTask());
assertNotNull(transcription.getSegments());
assertFalse(transcription.getSegments().isEmpty());
}
static void assertAudioTranslationSimpleJson(AudioTranslation translation, String expectedText) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNull(translation.getDuration());
assertNull(translation.getLanguage());
assertNull(translation.getTask());
assertNull(translation.getSegments());
}
static void assertAudioTranslationVerboseJson(AudioTranslation translation, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNotNull(translation.getDuration());
assertNotNull(translation.getLanguage());
assertEquals(audioTaskLabel, translation.getTask());
assertNotNull(translation.getSegments());
assertFalse(translation.getSegments().isEmpty());
}
protected static final String BATMAN_TRANSCRIPTION =
"Skills and Abilities. Batman has no inherent superpowers. He relies on his own "
+ "scientific knowledge, detective skills, and athletic prowess. In the stories, Batman is "
+ "regarded as one of the world's greatest detectives, if not the world's greatest "
+ "crime solver. Batman has been repeatedly described as having genius-level intellect, one of"
+ " the greatest martial artists in the DC universe, and having peak human physical "
+ "conditioning. He has traveled the world acquiring the skills needed to aid his crusade "
+ "against crime. His knowledge and expertise in almost every discipline known to man is nearly "
+ "unparalleled by any other character in the universe. Batman's inexhaustible wealth allows "
+ "him to access advanced technology. As a proficient scientist, he is able to use and modify "
+ "those technologies to his advantage. Batman describes Superman as the most dangerous man on "
+ "earth, able to defeat a team of super-powered extraterrestrials by himself in order to "
+ "rescue his imprisoned teammates in the first storyline. Superman also considers Batman "
+ "to be one of the most brilliant minds on the planet. Batman has the ability to function "
+ "under great physical pain and withstand mind control. He is a master of disguise, multilingual, "
+ "and an expert in espionage, often gathering information under different identities. "
+ "Batman's karate, judo, and jujitsu training has made him a master of stealth and escape, "
+ "allowing him to appear and disappear at will, and to break free from the chains of his past.";
} | class OpenAIClientTestBase extends TestProxyTestBase {
OpenAIClientBuilder getOpenAIClientBuilder(HttpClient httpClient, OpenAIServiceVersion serviceVersion) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.httpClient(httpClient)
.serviceVersion(serviceVersion);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
builder
.endpoint("https:
.credential(new AzureKeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
} else {
builder
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT"))
.credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY")));
}
return builder;
}
OpenAIClientBuilder getNonAzureOpenAIClientBuilder(HttpClient httpClient) {
OpenAIClientBuilder builder = new OpenAIClientBuilder()
.httpClient(httpClient);
if (getTestMode() != TestMode.LIVE) {
addTestRecordCustomSanitizers();
}
if (getTestMode() == TestMode.PLAYBACK) {
builder
.credential(new KeyCredential(FAKE_API_KEY));
} else if (getTestMode() == TestMode.RECORD) {
builder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
} else {
builder
.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY")));
}
return builder;
}
protected String getAzureCognitiveSearchKey() {
String azureCognitiveSearchKey = Configuration.getGlobalConfiguration().get("ACS_BYOD_API_KEY");
if (getTestMode() == TestMode.PLAYBACK) {
return FAKE_API_KEY;
} else if (azureCognitiveSearchKey != null) {
return azureCognitiveSearchKey;
} else {
throw new IllegalStateException(
"No Azure Cognitive Search API key found. "
+ "Please set the appropriate environment variable to use this value.");
}
}
@Test
public abstract void testGetCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletions(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetChatCompletionsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddings(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
@Test
public abstract void testGetEmbeddingsWithResponse(HttpClient httpClient, OpenAIServiceVersion serviceVersion);
void getCompletionsRunner(BiConsumer<String, List<String>> testRunner) {
String deploymentId = "text-davinci-003";
List<String> prompt = new ArrayList<>();
prompt.add("Say this is a test");
testRunner.accept(deploymentId, prompt);
}
void getCompletionsFromSinglePromptRunner(BiConsumer<String, String> testRunner) {
String deploymentId = "text-davinci-003";
String prompt = "Say this is a test";
testRunner.accept(deploymentId, prompt);
}
void getChatCompletionsRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-35-turbo", getChatMessages());
}
void getChatCompletionsForNonAzureRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo", getChatMessages());
}
void getChatCompletionsAzureChatSearchRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
ChatCompletionsOptions chatCompletionsOptions = new ChatCompletionsOptions(
Arrays.asList(new ChatMessage(ChatRole.USER, "What does PR complete mean?")));
testRunner.accept("gpt-4-0613", chatCompletionsOptions);
}
void getEmbeddingRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getEmbeddingNonAzureRunner(BiConsumer<String, EmbeddingsOptions> testRunner) {
testRunner.accept("text-embedding-ada-002", new EmbeddingsOptions(Arrays.asList("Your text string goes here")));
}
void getImageGenerationRunner(Consumer<ImageGenerationOptions> testRunner) {
testRunner.accept(
new ImageGenerationOptions("A drawing of the Seattle skyline in the style of Van Gogh")
);
}
void getChatFunctionForNonAzureRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessagesWithFunction());
}
void getChatFunctionForRunner(BiConsumer<String, ChatCompletionsOptions> testRunner) {
testRunner.accept("gpt-4-0613", getChatMessagesWithFunction());
}
void getChatCompletionsContentFilterRunner(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-4", getChatMessages());
}
void getCompletionsContentFilterRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-003", "What is 3 times 4?");
}
void getChatCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, List<ChatMessage>> testRunner) {
testRunner.accept("gpt-3.5-turbo-0613", getChatMessages());
}
void getCompletionsContentFilterRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("text-davinci-002", "What is 3 times 4?");
}
void getAudioTranscriptionRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "batman.wav");
}
void getAudioTranslationRunner(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-deployment", "JP_it_is_rainy_today.wav");
}
void getAudioTranscriptionRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "batman.wav");
}
void getAudioTranslationRunnerForNonAzure(BiConsumer<String, String> testRunner) {
testRunner.accept("whisper-1", "JP_it_is_rainy_today.wav");
}
private List<ChatMessage> getChatMessages() {
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.SYSTEM, "You are a helpful assistant. You will talk like a pirate."));
chatMessages.add(new ChatMessage(ChatRole.USER, "Can you help me?"));
chatMessages.add(new ChatMessage(ChatRole.ASSISTANT, "Of course, me hearty! What can I do for ye?"));
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the best way to train a parrot?"));
return chatMessages;
}
private ChatCompletionsOptions getChatMessagesWithFunction() {
FunctionDefinition functionDefinition = new FunctionDefinition("MyFunction");
Parameters parameters = new Parameters();
functionDefinition.setParameters(parameters);
List<FunctionDefinition> functions = Arrays.asList(functionDefinition);
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.USER, "What's the weather like in San Francisco in Celsius?"));
ChatCompletionsOptions chatCompletionOptions = new ChatCompletionsOptions(chatMessages);
chatCompletionOptions.setFunctions(functions);
return chatCompletionOptions;
}
static Path openTestResourceFile(String fileName) {
return Paths.get("src/test/resources/" + fileName);
}
static void assertCompletions(int choicesPerPrompt, Completions actual) {
assertCompletions(choicesPerPrompt, "stop", actual);
}
static void assertCompletions(int choicesPerPrompt, String expectedFinishReason, Completions actual) {
assertNotNull(actual);
assertInstanceOf(Completions.class, actual);
assertChoices(choicesPerPrompt, expectedFinishReason, actual.getChoices());
assertNotNull(actual.getUsage());
}
static <T> T assertAndGetValueFromResponse(Response<BinaryData> actualResponse, Class<T> clazz, int expectedCode) {
assertNotNull(actualResponse);
assertEquals(expectedCode, actualResponse.getStatusCode());
assertInstanceOf(Response.class, actualResponse);
BinaryData binaryData = actualResponse.getValue();
assertNotNull(binaryData);
T object = binaryData.toObject(clazz);
assertNotNull(object);
assertInstanceOf(clazz, object);
return object;
}
static void assertChoices(int choicesPerPrompt, String expectedFinishReason, List<Choice> actual) {
assertEquals(choicesPerPrompt, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChoice(i, expectedFinishReason, actual.get(i));
}
}
static void assertChoice(int index, String expectedFinishReason, Choice actual) {
assertNotNull(actual.getText());
assertEquals(index, actual.getIndex());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertChatCompletions(int choiceCount, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, "stop", ChatRole.ASSISTANT, choices);
assertNotNull(actual.getUsage());
}
static void assertChatCompletionsStream(ChatCompletions chatCompletions) {
if (chatCompletions.getId() != null && !chatCompletions.getId().isEmpty()) {
assertNotNull(chatCompletions.getId());
assertNotNull(chatCompletions.getChoices());
assertFalse(chatCompletions.getChoices().isEmpty());
assertNotNull(chatCompletions.getChoices().get(0).getDelta());
}
}
static void assertCompletionsStream(Completions completions) {
if (completions.getId() != null && !completions.getId().isEmpty()) {
assertNotNull(completions.getId());
assertNotNull(completions.getChoices());
assertFalse(completions.getChoices().isEmpty());
assertNotNull(completions.getChoices().get(0).getText());
}
}
static void assertChatCompletions(int choiceCount, String expectedFinishReason, ChatRole chatRole, ChatCompletions actual) {
List<ChatChoice> choices = actual.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(choiceCount, expectedFinishReason, chatRole, choices);
assertNotNull(actual.getUsage());
}
static void assertChatChoices(int choiceCount, String expectedFinishReason, ChatRole chatRole, List<ChatChoice> actual) {
assertEquals(choiceCount, actual.size());
for (int i = 0; i < actual.size(); i++) {
assertChatChoice(i, expectedFinishReason, chatRole, actual.get(i));
}
}
static void assertChatChoice(int index, String expectedFinishReason, ChatRole chatRole, ChatChoice actual) {
assertEquals(index, actual.getIndex());
assertEquals(chatRole, actual.getMessage().getRole());
assertNotNull(actual.getMessage().getContent());
assertEquals(expectedFinishReason, actual.getFinishReason().toString());
}
static void assertEmbeddings(Embeddings actual) {
List<EmbeddingItem> data = actual.getData();
assertNotNull(data);
assertTrue(data.size() > 0);
for (EmbeddingItem item : data) {
List<Double> embedding = item.getEmbedding();
assertNotNull(embedding);
assertTrue(embedding.size() > 0);
}
assertNotNull(actual.getUsage());
}
static void assertImageResponse(ImageResponse actual) {
assertNotNull(actual.getData());
assertFalse(actual.getData().isEmpty());
}
static <T> T assertFunctionCall(ChatChoice actual, String functionName, Class<T> myPropertiesClazz) {
assertEquals(0, actual.getIndex());
assertEquals("function_call", actual.getFinishReason().toString());
FunctionCall functionCall = actual.getMessage().getFunctionCall();
assertEquals(functionName, functionCall.getName());
BinaryData argumentJson = BinaryData.fromString(functionCall.getArguments());
return argumentJson.toObject(myPropertiesClazz);
}
static void assertSafeContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertFalse(contentFilterResults.getHate().isFiltered());
assertEquals(contentFilterResults.getHate().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSexual().isFiltered());
assertEquals(contentFilterResults.getSexual().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getSelfHarm().isFiltered());
assertEquals(contentFilterResults.getSelfHarm().getSeverity(), ContentFilterSeverity.SAFE);
assertFalse(contentFilterResults.getViolence().isFiltered());
assertEquals(contentFilterResults.getViolence().getSeverity(), ContentFilterSeverity.SAFE);
}
static void assertEmptyContentFilterResults(ContentFilterResults contentFilterResults) {
assertNotNull(contentFilterResults);
assertNull(contentFilterResults.getHate());
assertNull(contentFilterResults.getSexual());
assertNull(contentFilterResults.getViolence());
assertNull(contentFilterResults.getSelfHarm());
}
static void assertChatCompletionsCognitiveSearch(ChatCompletions chatCompletions) {
List<ChatChoice> choices = chatCompletions.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
assertChatChoices(1, CompletionsFinishReason.STOPPED.toString(), ChatRole.ASSISTANT, choices);
AzureChatExtensionsMessageContext messageContext = choices.get(0).getMessage().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
}
static void assertChatCompletionsStreamingCognitiveSearch(Stream<ChatCompletions> chatCompletionsStream) {
List<ChatCompletions> chatCompletions = chatCompletionsStream.collect(Collectors.toList());
assertTrue(chatCompletions.toArray().length > 1);
for (int i = 0; i < chatCompletions.size(); i++) {
ChatCompletions chatCompletion = chatCompletions.get(i);
List<ChatChoice> choices = chatCompletion.getChoices();
assertNotNull(choices);
assertTrue(choices.size() > 0);
if (i == 0) {
AzureChatExtensionsMessageContext messageContext = choices.get(0).getDelta().getContext();
assertNotNull(messageContext);
assertNotNull(messageContext.getMessages());
ChatMessage firstMessage = messageContext.getMessages().get(0);
assertNotNull(firstMessage);
assertEquals(ChatRole.TOOL, firstMessage.getRole());
assertFalse(firstMessage.getContent().isEmpty());
assertTrue(firstMessage.getContent().contains("citations"));
} else if (i == 1) {
assertNull(choices.get(0).getDelta().getContext());
assertEquals(choices.get(0).getDelta().getRole(), ChatRole.ASSISTANT);
} else if (i == chatCompletions.size() - 1) {
assertEquals(choices.get(0).getFinishReason(), CompletionsFinishReason.STOPPED);
} else {
assertNotNull(choices.get(0).getDelta().getContent());
}
}
}
static void assertAudioTranscriptionSimpleJson(AudioTranscription transcription, String expectedText) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNull(transcription.getDuration());
assertNull(transcription.getLanguage());
assertNull(transcription.getTask());
assertNull(transcription.getSegments());
}
static void assertAudioTranscriptionVerboseJson(AudioTranscription transcription, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(transcription);
assertEquals(expectedText, transcription.getText());
assertNotNull(transcription.getDuration());
assertNotNull(transcription.getLanguage());
assertEquals(audioTaskLabel, transcription.getTask());
assertNotNull(transcription.getSegments());
assertFalse(transcription.getSegments().isEmpty());
}
static void assertAudioTranslationSimpleJson(AudioTranslation translation, String expectedText) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNull(translation.getDuration());
assertNull(translation.getLanguage());
assertNull(translation.getTask());
assertNull(translation.getSegments());
}
static void assertAudioTranslationVerboseJson(AudioTranslation translation, String expectedText, AudioTaskLabel audioTaskLabel) {
assertNotNull(translation);
assertEquals(expectedText, translation.getText());
assertNotNull(translation.getDuration());
assertNotNull(translation.getLanguage());
assertEquals(audioTaskLabel, translation.getTask());
assertNotNull(translation.getSegments());
assertFalse(translation.getSegments().isEmpty());
}
protected static final String BATMAN_TRANSCRIPTION =
"Skills and Abilities. Batman has no inherent superpowers. He relies on his own "
+ "scientific knowledge, detective skills, and athletic prowess. In the stories, Batman is "
+ "regarded as one of the world's greatest detectives, if not the world's greatest "
+ "crime solver. Batman has been repeatedly described as having genius-level intellect, one of"
+ " the greatest martial artists in the DC universe, and having peak human physical "
+ "conditioning. He has traveled the world acquiring the skills needed to aid his crusade "
+ "against crime. His knowledge and expertise in almost every discipline known to man is nearly "
+ "unparalleled by any other character in the universe. Batman's inexhaustible wealth allows "
+ "him to access advanced technology. As a proficient scientist, he is able to use and modify "
+ "those technologies to his advantage. Batman describes Superman as the most dangerous man on "
+ "earth, able to defeat a team of super-powered extraterrestrials by himself in order to "
+ "rescue his imprisoned teammates in the first storyline. Superman also considers Batman "
+ "to be one of the most brilliant minds on the planet. Batman has the ability to function "
+ "under great physical pain and withstand mind control. He is a master of disguise, multilingual, "
+ "and an expert in espionage, often gathering information under different identities. "
+ "Batman's karate, judo, and jujitsu training has made him a master of stealth and escape, "
+ "allowing him to appear and disappear at will, and to break free from the chains of his past.";
} |
do we need this if/else? | private T doCreate(K key, @Nullable P properties) {
StopWatch stopWatch = new StopWatch();
stopWatch.start();
final String resourceType = getResourceType();
final String name = getResourceName(key);
try {
LOGGER.info("Creating {} with name '{}' ...", resourceType, name);
if (properties == null) {
return internalCreate(key);
} else {
return internalCreate(key, properties);
}
} catch (ManagementException e) {
String message = String.format("Creating %s with name '%s' failed due to: %s", resourceType, name, e.toString());
throw new RuntimeException(message, e);
} finally {
stopWatch.stop();
LOGGER.info("Creating {} with name '{}' finished in {} seconds", getResourceType(), name,
stopWatch.getTotalTimeMillis() / 1000);
}
} | } | private T doCreate(K key, @Nullable P properties) {
StopWatch stopWatch = new StopWatch();
stopWatch.start();
final String resourceType = getResourceType();
final String name = getResourceName(key);
try {
LOGGER.info("Creating {} with name '{}' ...", resourceType, name);
return internalCreate(key, properties);
} catch (ManagementException e) {
String message = String.format("Creating %s with name '%s' failed due to: %s", resourceType, name, e.toString());
throw new RuntimeException(message, e);
} finally {
stopWatch.stop();
LOGGER.info("Creating {} with name '{}' finished in {} seconds", getResourceType(), name,
stopWatch.getTotalTimeMillis() / 1000);
}
} | class AbstractResourceCrud<T, K, P> implements ResourceCrud<T, K, P> {
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractResourceCrud.class);
static final int RESOURCE_NOT_FOUND = 404;
protected final AzureResourceManager resourceManager;
protected final AzureResourceMetadata resourceMetadata;
/**
* Creates a new instance of {@link AbstractResourceCrud}.
*
* @param resourceManager The Azure resource manager.
* @param resourceMetadata The Azure resource metadata.
*/
protected AbstractResourceCrud(@NonNull AzureResourceManager resourceManager,
@NonNull AzureResourceMetadata resourceMetadata) {
this.resourceManager = resourceManager;
this.resourceMetadata = resourceMetadata;
}
@Override
public boolean exists(K key) {
boolean exists = get(key) != null;
if (!exists) {
LOGGER.debug("{} '{}' does not exist.", getResourceType(), getResourceName(key));
}
return exists;
}
@Override
public T get(K key) {
StopWatch stopWatch = new StopWatch();
stopWatch.start();
final String resourceType = getResourceType();
final String name = getResourceName(key);
try {
LOGGER.info("Fetching {} with name '{}' ...", resourceType, name);
return internalGet(key);
} catch (ManagementException e) {
String message = String.format("Fetching %s with name '%s' failed due to: %s", resourceType, name, e.toString());
throw new RuntimeException(message, e);
} finally {
stopWatch.stop();
LOGGER.info("Fetching {} with name '{}' finished in {} seconds", resourceType, name,
stopWatch.getTotalTimeMillis() / 1000);
}
}
@Override
public T create(K key) {
return doCreate(key, null);
}
@Override
public T create(K key, P properties) {
return doCreate(key, properties);
}
@Override
public T getOrCreate(K key) {
return doGetOrCreate(key, null);
}
@Override
public T getOrCreate(K key, P properties) {
return doGetOrCreate(key, properties);
}
private T doGetOrCreate(K key, @Nullable P properties) {
T result = get(key);
if (result != null) {
return result;
}
return create(key, properties);
}
abstract String getResourceName(K key);
abstract String getResourceType();
abstract T internalGet(K key);
abstract T internalCreate(K key);
T internalCreate(K key, @Nullable P properties) {
return internalCreate(key);
}
} | class AbstractResourceCrud<T, K, P> implements ResourceCrud<T, K, P> {
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractResourceCrud.class);
static final int RESOURCE_NOT_FOUND = 404;
protected final AzureResourceManager resourceManager;
protected final AzureResourceMetadata resourceMetadata;
/**
* Creates a new instance of {@link AbstractResourceCrud}.
*
* @param resourceManager The Azure resource manager.
* @param resourceMetadata The Azure resource metadata.
*/
protected AbstractResourceCrud(@NonNull AzureResourceManager resourceManager,
@NonNull AzureResourceMetadata resourceMetadata) {
this.resourceManager = resourceManager;
this.resourceMetadata = resourceMetadata;
}
@Override
public boolean exists(K key) {
boolean exists = get(key) != null;
if (!exists) {
LOGGER.debug("{} '{}' does not exist.", getResourceType(), getResourceName(key));
}
return exists;
}
@Override
public T get(K key) {
StopWatch stopWatch = new StopWatch();
stopWatch.start();
final String resourceType = getResourceType();
final String name = getResourceName(key);
try {
LOGGER.info("Fetching {} with name '{}' ...", resourceType, name);
return internalGet(key);
} catch (ManagementException e) {
String message = String.format("Fetching %s with name '%s' failed due to: %s", resourceType, name, e.toString());
throw new RuntimeException(message, e);
} finally {
stopWatch.stop();
LOGGER.info("Fetching {} with name '{}' finished in {} seconds", resourceType, name,
stopWatch.getTotalTimeMillis() / 1000);
}
}
@Override
public T create(K key) {
return doCreate(key, null);
}
@Override
public T create(K key, P properties) {
return doCreate(key, properties);
}
@Override
public T getOrCreate(K key) {
return doGetOrCreate(key, null);
}
@Override
public T getOrCreate(K key, P properties) {
return doGetOrCreate(key, properties);
}
private T doGetOrCreate(K key, @Nullable P properties) {
T result = get(key);
if (result != null) {
return result;
}
return doCreate(key, properties);
}
abstract String getResourceName(K key);
abstract String getResourceType();
abstract T internalGet(K key);
abstract T internalCreate(K key);
T internalCreate(K key, @Nullable P properties) {
return internalCreate(key);
}
} |
I think assumption here when deep-copy-disabled:true is - 1. the Flux elements as processed in the emitting thread, and 3. There won’t be any operator in the chain later with (implicit or explicit) buffering that cause this _using()_ to run cleanup before the consumption of that buffer. Is this correct? Just thinking loud since doFinally used to keep the Response open until the full chain terminates and any impact by new change when deep-copy-disabled:true. | public Flux<ByteBuffer> getBody() {
return Flux.using(() -> this, response -> response.bodyIntern()
.map(byteBuf -> this.disableBufferCopy ? byteBuf.nioBuffer() : deepCopyBuffer(byteBuf)),
NettyAsyncHttpResponse::close);
} | NettyAsyncHttpResponse::close); | public Flux<ByteBuffer> getBody() {
return Flux.using(() -> this, response -> response.bodyIntern()
.map(byteBuf -> this.disableBufferCopy ? byteBuf.nioBuffer() : deepCopyBuffer(byteBuf)),
NettyAsyncHttpResponse::close);
} | class NettyAsyncHttpResponse extends NettyAsyncHttpResponseBase {
private final Connection reactorNettyConnection;
private final boolean disableBufferCopy;
public NettyAsyncHttpResponse(HttpClientResponse reactorNettyResponse, Connection reactorNettyConnection,
HttpRequest httpRequest, boolean disableBufferCopy, boolean headersEagerlyConverted) {
super(reactorNettyResponse, httpRequest, headersEagerlyConverted);
this.reactorNettyConnection = reactorNettyConnection;
this.disableBufferCopy = disableBufferCopy;
}
@Override
@Override
public Mono<byte[]> getBodyAsByteArray() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asByteArray(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(bytes -> CoreUtils.bomAwareToString(bytes,
getHeaderValue(HttpHeaderName.CONTENT_TYPE)));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asString(charset),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<InputStream> getBodyAsInputStream() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asInputStream(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<Void> writeBodyToAsync(AsynchronousByteChannel channel) {
Long length = getContentLength();
return Mono.using(() -> this, response -> Mono.create(sink -> response.bodyIntern()
.subscribe(new ByteBufWriteSubscriber(byteBuffer -> channel.write(byteBuffer).get(), sink, length))),
NettyAsyncHttpResponse::close);
}
@Override
public void writeBodyTo(WritableByteChannel channel) {
Mono.using(() -> this, response -> Mono.<Void>create(sink -> response.bodyIntern().subscribe(
new ByteBufWriteSubscriber(channel::write, sink, getContentLength())))
.subscribeOn(Schedulers.boundedElastic()), NettyAsyncHttpResponse::close)
.block();
}
@Override
public void close() {
closeConnection(reactorNettyConnection);
}
private ByteBufFlux bodyIntern() {
return reactorNettyConnection.inbound().receive();
}
public Connection internConnection() {
return reactorNettyConnection;
}
private Long getContentLength() {
String contentLength = getHeaders().getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLength == null) {
return null;
}
try {
return Long.parseLong(contentLength);
} catch (NumberFormatException ex) {
return null;
}
}
} | class NettyAsyncHttpResponse extends NettyAsyncHttpResponseBase {
private final Connection reactorNettyConnection;
private final boolean disableBufferCopy;
public NettyAsyncHttpResponse(HttpClientResponse reactorNettyResponse, Connection reactorNettyConnection,
HttpRequest httpRequest, boolean disableBufferCopy, boolean headersEagerlyConverted) {
super(reactorNettyResponse, httpRequest, headersEagerlyConverted);
this.reactorNettyConnection = reactorNettyConnection;
this.disableBufferCopy = disableBufferCopy;
}
@Override
@Override
public Mono<byte[]> getBodyAsByteArray() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asByteArray(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(bytes -> CoreUtils.bomAwareToString(bytes,
getHeaderValue(HttpHeaderName.CONTENT_TYPE)));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asString(charset),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<InputStream> getBodyAsInputStream() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asInputStream(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<Void> writeBodyToAsync(AsynchronousByteChannel channel) {
Long length = getContentLength();
return Mono.using(() -> this, response -> Mono.create(sink -> response.bodyIntern()
.subscribe(new ByteBufWriteSubscriber(byteBuffer -> channel.write(byteBuffer).get(), sink, length))),
NettyAsyncHttpResponse::close);
}
@Override
public void writeBodyTo(WritableByteChannel channel) {
Mono.using(() -> this, response -> Mono.<Void>create(sink -> response.bodyIntern().subscribe(
new ByteBufWriteSubscriber(channel::write, sink, getContentLength())))
.subscribeOn(Schedulers.boundedElastic()), NettyAsyncHttpResponse::close)
.block();
}
@Override
public void close() {
closeConnection(reactorNettyConnection);
}
private ByteBufFlux bodyIntern() {
return reactorNettyConnection.inbound().receive();
}
public Connection internConnection() {
return reactorNettyConnection;
}
private Long getContentLength() {
String contentLength = getHeaders().getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLength == null) {
return null;
}
try {
return Long.parseLong(contentLength);
} catch (NumberFormatException ex) {
return null;
}
}
} |
Shouldn't the `Flux.using` result in effectively the same lifetime for the response? Where both cases would close the response shortly after a terminal event from the `response.bodyIntern` stream. | public Flux<ByteBuffer> getBody() {
return Flux.using(() -> this, response -> response.bodyIntern()
.map(byteBuf -> this.disableBufferCopy ? byteBuf.nioBuffer() : deepCopyBuffer(byteBuf)),
NettyAsyncHttpResponse::close);
} | NettyAsyncHttpResponse::close); | public Flux<ByteBuffer> getBody() {
return Flux.using(() -> this, response -> response.bodyIntern()
.map(byteBuf -> this.disableBufferCopy ? byteBuf.nioBuffer() : deepCopyBuffer(byteBuf)),
NettyAsyncHttpResponse::close);
} | class NettyAsyncHttpResponse extends NettyAsyncHttpResponseBase {
private final Connection reactorNettyConnection;
private final boolean disableBufferCopy;
public NettyAsyncHttpResponse(HttpClientResponse reactorNettyResponse, Connection reactorNettyConnection,
HttpRequest httpRequest, boolean disableBufferCopy, boolean headersEagerlyConverted) {
super(reactorNettyResponse, httpRequest, headersEagerlyConverted);
this.reactorNettyConnection = reactorNettyConnection;
this.disableBufferCopy = disableBufferCopy;
}
@Override
@Override
public Mono<byte[]> getBodyAsByteArray() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asByteArray(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(bytes -> CoreUtils.bomAwareToString(bytes,
getHeaderValue(HttpHeaderName.CONTENT_TYPE)));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asString(charset),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<InputStream> getBodyAsInputStream() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asInputStream(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<Void> writeBodyToAsync(AsynchronousByteChannel channel) {
Long length = getContentLength();
return Mono.using(() -> this, response -> Mono.create(sink -> response.bodyIntern()
.subscribe(new ByteBufWriteSubscriber(byteBuffer -> channel.write(byteBuffer).get(), sink, length))),
NettyAsyncHttpResponse::close);
}
@Override
public void writeBodyTo(WritableByteChannel channel) {
Mono.using(() -> this, response -> Mono.<Void>create(sink -> response.bodyIntern().subscribe(
new ByteBufWriteSubscriber(channel::write, sink, getContentLength())))
.subscribeOn(Schedulers.boundedElastic()), NettyAsyncHttpResponse::close)
.block();
}
@Override
public void close() {
closeConnection(reactorNettyConnection);
}
private ByteBufFlux bodyIntern() {
return reactorNettyConnection.inbound().receive();
}
public Connection internConnection() {
return reactorNettyConnection;
}
private Long getContentLength() {
String contentLength = getHeaders().getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLength == null) {
return null;
}
try {
return Long.parseLong(contentLength);
} catch (NumberFormatException ex) {
return null;
}
}
} | class NettyAsyncHttpResponse extends NettyAsyncHttpResponseBase {
private final Connection reactorNettyConnection;
private final boolean disableBufferCopy;
public NettyAsyncHttpResponse(HttpClientResponse reactorNettyResponse, Connection reactorNettyConnection,
HttpRequest httpRequest, boolean disableBufferCopy, boolean headersEagerlyConverted) {
super(reactorNettyResponse, httpRequest, headersEagerlyConverted);
this.reactorNettyConnection = reactorNettyConnection;
this.disableBufferCopy = disableBufferCopy;
}
@Override
@Override
public Mono<byte[]> getBodyAsByteArray() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asByteArray(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(bytes -> CoreUtils.bomAwareToString(bytes,
getHeaderValue(HttpHeaderName.CONTENT_TYPE)));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asString(charset),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<InputStream> getBodyAsInputStream() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asInputStream(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<Void> writeBodyToAsync(AsynchronousByteChannel channel) {
Long length = getContentLength();
return Mono.using(() -> this, response -> Mono.create(sink -> response.bodyIntern()
.subscribe(new ByteBufWriteSubscriber(byteBuffer -> channel.write(byteBuffer).get(), sink, length))),
NettyAsyncHttpResponse::close);
}
@Override
public void writeBodyTo(WritableByteChannel channel) {
Mono.using(() -> this, response -> Mono.<Void>create(sink -> response.bodyIntern().subscribe(
new ByteBufWriteSubscriber(channel::write, sink, getContentLength())))
.subscribeOn(Schedulers.boundedElastic()), NettyAsyncHttpResponse::close)
.block();
}
@Override
public void close() {
closeConnection(reactorNettyConnection);
}
private ByteBufFlux bodyIntern() {
return reactorNettyConnection.inbound().receive();
}
public Connection internConnection() {
return reactorNettyConnection;
}
private Long getContentLength() {
String contentLength = getHeaders().getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLength == null) {
return null;
}
try {
return Long.parseLong(contentLength);
} catch (NumberFormatException ex) {
return null;
}
}
} |
I was thinking about the following use case (if it's ever valid when deep-copy-disabled:true). Like shown in the output the release of response (owning the buffer?) happens early in case of `using`, so I was wondering if this is valid execution in our Netty case with deep-copy-disabled:true, if so, could this lead to data loss / ccorruption. ### Cleanup with doFinally operator ```java @Test public void cleanupViaDoFinally() throws InterruptedException { Flux<Integer> nioFlux = Flux.range(1, 10) .publishOn(Schedulers.boundedElastic()); // mimic emission of nio buffer in netty-io thread Flux<Integer> nioFluxWithCleanup = nioFlux .doFinally(s -> { System.out.println("running 'doFinally' to close-response."); }); CountDownLatch latch = new CountDownLatch(1); nioFluxWithCleanup .collectList() // mimic buffering of nio buffers .subscribe(value -> { try { TimeUnit.SECONDS.sleep(1); } catch (InterruptedException e) { throw new RuntimeException(e); } System.out.println("nio-buffers:" + value); }, e -> { System.out.println("DownStream: Got Error"); latch.countDown(); }, () -> { System.out.println("DownStream: Got Completion"); latch.countDown(); }); latch.await(); } ``` Here the output is: > nio-buffers:[1, 2, 3, 4, 5, 6, 7, 8, 9, 10] DownStream: Got Completion running 'doFinally' to close-response. ### Cleanup with using operator ```java @Test public void cleanupViaUsing() throws InterruptedException { Flux<Integer> nioFlux = Flux.range(1, 10) .publishOn(Schedulers.boundedElastic()); // mimic emission of nio buffer in netty-io thread Flux<Integer> nioFluxWithCleanup = Flux.using(() -> nioFlux, s -> s, s -> { System.out.println("running 'using-cleanup' to close-response.");}); CountDownLatch latch = new CountDownLatch(1); nioFluxWithCleanup .collectList()// mimic buffering of nio buffers .subscribe(value -> { try { TimeUnit.SECONDS.sleep(1); } catch (InterruptedException e) { throw new RuntimeException(e); } System.out.println("nio-buffers:" + value); }, e -> { System.out.println("DownStream: Got Error"); latch.countDown(); }, () -> { System.out.println("DownStream: Got Completion"); latch.countDown(); }); latch.await(); } ``` Here the output is: > running 'using-cleanup' to close-response. nio-buffers:[1, 2, 3, 4, 5, 6, 7, 8, 9, 10] DownStream: Got Completion | public Flux<ByteBuffer> getBody() {
return Flux.using(() -> this, response -> response.bodyIntern()
.map(byteBuf -> this.disableBufferCopy ? byteBuf.nioBuffer() : deepCopyBuffer(byteBuf)),
NettyAsyncHttpResponse::close);
} | NettyAsyncHttpResponse::close); | public Flux<ByteBuffer> getBody() {
return Flux.using(() -> this, response -> response.bodyIntern()
.map(byteBuf -> this.disableBufferCopy ? byteBuf.nioBuffer() : deepCopyBuffer(byteBuf)),
NettyAsyncHttpResponse::close);
} | class NettyAsyncHttpResponse extends NettyAsyncHttpResponseBase {
private final Connection reactorNettyConnection;
private final boolean disableBufferCopy;
public NettyAsyncHttpResponse(HttpClientResponse reactorNettyResponse, Connection reactorNettyConnection,
HttpRequest httpRequest, boolean disableBufferCopy, boolean headersEagerlyConverted) {
super(reactorNettyResponse, httpRequest, headersEagerlyConverted);
this.reactorNettyConnection = reactorNettyConnection;
this.disableBufferCopy = disableBufferCopy;
}
@Override
@Override
public Mono<byte[]> getBodyAsByteArray() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asByteArray(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(bytes -> CoreUtils.bomAwareToString(bytes,
getHeaderValue(HttpHeaderName.CONTENT_TYPE)));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asString(charset),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<InputStream> getBodyAsInputStream() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asInputStream(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<Void> writeBodyToAsync(AsynchronousByteChannel channel) {
Long length = getContentLength();
return Mono.using(() -> this, response -> Mono.create(sink -> response.bodyIntern()
.subscribe(new ByteBufWriteSubscriber(byteBuffer -> channel.write(byteBuffer).get(), sink, length))),
NettyAsyncHttpResponse::close);
}
@Override
public void writeBodyTo(WritableByteChannel channel) {
Mono.using(() -> this, response -> Mono.<Void>create(sink -> response.bodyIntern().subscribe(
new ByteBufWriteSubscriber(channel::write, sink, getContentLength())))
.subscribeOn(Schedulers.boundedElastic()), NettyAsyncHttpResponse::close)
.block();
}
@Override
public void close() {
closeConnection(reactorNettyConnection);
}
private ByteBufFlux bodyIntern() {
return reactorNettyConnection.inbound().receive();
}
public Connection internConnection() {
return reactorNettyConnection;
}
private Long getContentLength() {
String contentLength = getHeaders().getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLength == null) {
return null;
}
try {
return Long.parseLong(contentLength);
} catch (NumberFormatException ex) {
return null;
}
}
} | class NettyAsyncHttpResponse extends NettyAsyncHttpResponseBase {
private final Connection reactorNettyConnection;
private final boolean disableBufferCopy;
public NettyAsyncHttpResponse(HttpClientResponse reactorNettyResponse, Connection reactorNettyConnection,
HttpRequest httpRequest, boolean disableBufferCopy, boolean headersEagerlyConverted) {
super(reactorNettyResponse, httpRequest, headersEagerlyConverted);
this.reactorNettyConnection = reactorNettyConnection;
this.disableBufferCopy = disableBufferCopy;
}
@Override
@Override
public Mono<byte[]> getBodyAsByteArray() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asByteArray(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(bytes -> CoreUtils.bomAwareToString(bytes,
getHeaderValue(HttpHeaderName.CONTENT_TYPE)));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asString(charset),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<InputStream> getBodyAsInputStream() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asInputStream(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<Void> writeBodyToAsync(AsynchronousByteChannel channel) {
Long length = getContentLength();
return Mono.using(() -> this, response -> Mono.create(sink -> response.bodyIntern()
.subscribe(new ByteBufWriteSubscriber(byteBuffer -> channel.write(byteBuffer).get(), sink, length))),
NettyAsyncHttpResponse::close);
}
@Override
public void writeBodyTo(WritableByteChannel channel) {
Mono.using(() -> this, response -> Mono.<Void>create(sink -> response.bodyIntern().subscribe(
new ByteBufWriteSubscriber(channel::write, sink, getContentLength())))
.subscribeOn(Schedulers.boundedElastic()), NettyAsyncHttpResponse::close)
.block();
}
@Override
public void close() {
closeConnection(reactorNettyConnection);
}
private ByteBufFlux bodyIntern() {
return reactorNettyConnection.inbound().receive();
}
public Connection internConnection() {
return reactorNettyConnection;
}
private Long getContentLength() {
String contentLength = getHeaders().getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLength == null) {
return null;
}
try {
return Long.parseLong(contentLength);
} catch (NumberFormatException ex) {
return null;
}
}
} |
I think this should be fine as `collectList` will collect all emissions from the `nioFlux` before the resource cleanup triggers. Putting that into perspective of the API in azure-core-http-netty, with buffering true or false either way the underlying network response will be consumed without ability for it to be replayed. When buffering the read data will be made durable by being brought into heap memory. | public Flux<ByteBuffer> getBody() {
return Flux.using(() -> this, response -> response.bodyIntern()
.map(byteBuf -> this.disableBufferCopy ? byteBuf.nioBuffer() : deepCopyBuffer(byteBuf)),
NettyAsyncHttpResponse::close);
} | NettyAsyncHttpResponse::close); | public Flux<ByteBuffer> getBody() {
return Flux.using(() -> this, response -> response.bodyIntern()
.map(byteBuf -> this.disableBufferCopy ? byteBuf.nioBuffer() : deepCopyBuffer(byteBuf)),
NettyAsyncHttpResponse::close);
} | class NettyAsyncHttpResponse extends NettyAsyncHttpResponseBase {
private final Connection reactorNettyConnection;
private final boolean disableBufferCopy;
public NettyAsyncHttpResponse(HttpClientResponse reactorNettyResponse, Connection reactorNettyConnection,
HttpRequest httpRequest, boolean disableBufferCopy, boolean headersEagerlyConverted) {
super(reactorNettyResponse, httpRequest, headersEagerlyConverted);
this.reactorNettyConnection = reactorNettyConnection;
this.disableBufferCopy = disableBufferCopy;
}
@Override
@Override
public Mono<byte[]> getBodyAsByteArray() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asByteArray(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(bytes -> CoreUtils.bomAwareToString(bytes,
getHeaderValue(HttpHeaderName.CONTENT_TYPE)));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asString(charset),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<InputStream> getBodyAsInputStream() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asInputStream(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<Void> writeBodyToAsync(AsynchronousByteChannel channel) {
Long length = getContentLength();
return Mono.using(() -> this, response -> Mono.create(sink -> response.bodyIntern()
.subscribe(new ByteBufWriteSubscriber(byteBuffer -> channel.write(byteBuffer).get(), sink, length))),
NettyAsyncHttpResponse::close);
}
@Override
public void writeBodyTo(WritableByteChannel channel) {
Mono.using(() -> this, response -> Mono.<Void>create(sink -> response.bodyIntern().subscribe(
new ByteBufWriteSubscriber(channel::write, sink, getContentLength())))
.subscribeOn(Schedulers.boundedElastic()), NettyAsyncHttpResponse::close)
.block();
}
@Override
public void close() {
closeConnection(reactorNettyConnection);
}
private ByteBufFlux bodyIntern() {
return reactorNettyConnection.inbound().receive();
}
public Connection internConnection() {
return reactorNettyConnection;
}
private Long getContentLength() {
String contentLength = getHeaders().getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLength == null) {
return null;
}
try {
return Long.parseLong(contentLength);
} catch (NumberFormatException ex) {
return null;
}
}
} | class NettyAsyncHttpResponse extends NettyAsyncHttpResponseBase {
private final Connection reactorNettyConnection;
private final boolean disableBufferCopy;
public NettyAsyncHttpResponse(HttpClientResponse reactorNettyResponse, Connection reactorNettyConnection,
HttpRequest httpRequest, boolean disableBufferCopy, boolean headersEagerlyConverted) {
super(reactorNettyResponse, httpRequest, headersEagerlyConverted);
this.reactorNettyConnection = reactorNettyConnection;
this.disableBufferCopy = disableBufferCopy;
}
@Override
@Override
public Mono<byte[]> getBodyAsByteArray() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asByteArray(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(bytes -> CoreUtils.bomAwareToString(bytes,
getHeaderValue(HttpHeaderName.CONTENT_TYPE)));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asString(charset),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<InputStream> getBodyAsInputStream() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asInputStream(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<Void> writeBodyToAsync(AsynchronousByteChannel channel) {
Long length = getContentLength();
return Mono.using(() -> this, response -> Mono.create(sink -> response.bodyIntern()
.subscribe(new ByteBufWriteSubscriber(byteBuffer -> channel.write(byteBuffer).get(), sink, length))),
NettyAsyncHttpResponse::close);
}
@Override
public void writeBodyTo(WritableByteChannel channel) {
Mono.using(() -> this, response -> Mono.<Void>create(sink -> response.bodyIntern().subscribe(
new ByteBufWriteSubscriber(channel::write, sink, getContentLength())))
.subscribeOn(Schedulers.boundedElastic()), NettyAsyncHttpResponse::close)
.block();
}
@Override
public void close() {
closeConnection(reactorNettyConnection);
}
private ByteBufFlux bodyIntern() {
return reactorNettyConnection.inbound().receive();
}
public Connection internConnection() {
return reactorNettyConnection;
}
private Long getContentLength() {
String contentLength = getHeaders().getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLength == null) {
return null;
}
try {
return Long.parseLong(contentLength);
} catch (NumberFormatException ex) {
return null;
}
}
} |
Sounds great, in that case this is safe, thanks!. | public Flux<ByteBuffer> getBody() {
return Flux.using(() -> this, response -> response.bodyIntern()
.map(byteBuf -> this.disableBufferCopy ? byteBuf.nioBuffer() : deepCopyBuffer(byteBuf)),
NettyAsyncHttpResponse::close);
} | NettyAsyncHttpResponse::close); | public Flux<ByteBuffer> getBody() {
return Flux.using(() -> this, response -> response.bodyIntern()
.map(byteBuf -> this.disableBufferCopy ? byteBuf.nioBuffer() : deepCopyBuffer(byteBuf)),
NettyAsyncHttpResponse::close);
} | class NettyAsyncHttpResponse extends NettyAsyncHttpResponseBase {
private final Connection reactorNettyConnection;
private final boolean disableBufferCopy;
public NettyAsyncHttpResponse(HttpClientResponse reactorNettyResponse, Connection reactorNettyConnection,
HttpRequest httpRequest, boolean disableBufferCopy, boolean headersEagerlyConverted) {
super(reactorNettyResponse, httpRequest, headersEagerlyConverted);
this.reactorNettyConnection = reactorNettyConnection;
this.disableBufferCopy = disableBufferCopy;
}
@Override
@Override
public Mono<byte[]> getBodyAsByteArray() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asByteArray(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(bytes -> CoreUtils.bomAwareToString(bytes,
getHeaderValue(HttpHeaderName.CONTENT_TYPE)));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asString(charset),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<InputStream> getBodyAsInputStream() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asInputStream(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<Void> writeBodyToAsync(AsynchronousByteChannel channel) {
Long length = getContentLength();
return Mono.using(() -> this, response -> Mono.create(sink -> response.bodyIntern()
.subscribe(new ByteBufWriteSubscriber(byteBuffer -> channel.write(byteBuffer).get(), sink, length))),
NettyAsyncHttpResponse::close);
}
@Override
public void writeBodyTo(WritableByteChannel channel) {
Mono.using(() -> this, response -> Mono.<Void>create(sink -> response.bodyIntern().subscribe(
new ByteBufWriteSubscriber(channel::write, sink, getContentLength())))
.subscribeOn(Schedulers.boundedElastic()), NettyAsyncHttpResponse::close)
.block();
}
@Override
public void close() {
closeConnection(reactorNettyConnection);
}
private ByteBufFlux bodyIntern() {
return reactorNettyConnection.inbound().receive();
}
public Connection internConnection() {
return reactorNettyConnection;
}
private Long getContentLength() {
String contentLength = getHeaders().getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLength == null) {
return null;
}
try {
return Long.parseLong(contentLength);
} catch (NumberFormatException ex) {
return null;
}
}
} | class NettyAsyncHttpResponse extends NettyAsyncHttpResponseBase {
private final Connection reactorNettyConnection;
private final boolean disableBufferCopy;
public NettyAsyncHttpResponse(HttpClientResponse reactorNettyResponse, Connection reactorNettyConnection,
HttpRequest httpRequest, boolean disableBufferCopy, boolean headersEagerlyConverted) {
super(reactorNettyResponse, httpRequest, headersEagerlyConverted);
this.reactorNettyConnection = reactorNettyConnection;
this.disableBufferCopy = disableBufferCopy;
}
@Override
@Override
public Mono<byte[]> getBodyAsByteArray() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asByteArray(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(bytes -> CoreUtils.bomAwareToString(bytes,
getHeaderValue(HttpHeaderName.CONTENT_TYPE)));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asString(charset),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<InputStream> getBodyAsInputStream() {
return Mono.using(() -> this, response -> response.bodyIntern().aggregate().asInputStream(),
NettyAsyncHttpResponse::close);
}
@Override
public Mono<Void> writeBodyToAsync(AsynchronousByteChannel channel) {
Long length = getContentLength();
return Mono.using(() -> this, response -> Mono.create(sink -> response.bodyIntern()
.subscribe(new ByteBufWriteSubscriber(byteBuffer -> channel.write(byteBuffer).get(), sink, length))),
NettyAsyncHttpResponse::close);
}
@Override
public void writeBodyTo(WritableByteChannel channel) {
Mono.using(() -> this, response -> Mono.<Void>create(sink -> response.bodyIntern().subscribe(
new ByteBufWriteSubscriber(channel::write, sink, getContentLength())))
.subscribeOn(Schedulers.boundedElastic()), NettyAsyncHttpResponse::close)
.block();
}
@Override
public void close() {
closeConnection(reactorNettyConnection);
}
private ByteBufFlux bodyIntern() {
return reactorNettyConnection.inbound().receive();
}
public Connection internConnection() {
return reactorNettyConnection;
}
private Long getContentLength() {
String contentLength = getHeaders().getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLength == null) {
return null;
}
try {
return Long.parseLong(contentLength);
} catch (NumberFormatException ex) {
return null;
}
}
} |
Should we consider using `publishOn` to use `boundedElastic` scheduler? | public BinaryDataContent toReplayableContent() {
if (isReplayable) {
return this;
}
FluxByteBufferContent replayableContent = cachedReplayableContent.get();
if (replayableContent != null) {
return replayableContent;
}
return bufferContent().map(bufferedData -> {
FluxByteBufferContent bufferedContent = new FluxByteBufferContent(Flux.fromIterable(bufferedData)
.map(ByteBuffer::duplicate), length, true);
cachedReplayableContent.set(bufferedContent);
return bufferedContent;
}).block();
} | }).block(); | public BinaryDataContent toReplayableContent() {
if (isReplayable) {
return this;
}
FluxByteBufferContent replayableContent = cachedReplayableContent.get();
if (replayableContent != null) {
return replayableContent;
}
return bufferContent().map(bufferedData -> {
FluxByteBufferContent bufferedContent = new FluxByteBufferContent(Flux.fromIterable(bufferedData)
.map(ByteBuffer::duplicate), length, true);
cachedReplayableContent.set(bufferedContent);
return bufferedContent;
}).block();
} | class FluxByteBufferContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(FluxByteBufferContent.class);
private final Flux<ByteBuffer> content;
private final AtomicReference<FluxByteBufferContent> cachedReplayableContent = new AtomicReference<>();
private final Long length;
private final boolean isReplayable;
private volatile byte[] bytes;
private static final AtomicReferenceFieldUpdater<FluxByteBufferContent, byte[]> BYTES_UPDATER
= AtomicReferenceFieldUpdater.newUpdater(FluxByteBufferContent.class, byte[].class, "bytes");
/**
* Creates an instance of {@link FluxByteBufferContent}.
* @param content The content for this instance.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content) {
this(content, null);
}
/**
* Creates an instance of {@link FluxByteBufferContent}.
* @param content The content for this instance.
* @param length The length of the content in bytes.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content, Long length) {
this(content, length, false);
}
/**
* Creates an instance {@link FluxByteBufferContent} where replay-ability is configurable.
*
* @param content The content for this instance.
* @param length The length of the content in bytes.
* @param isReplayable Whether the content is replayable.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content, Long length, boolean isReplayable) {
this.content = Objects.requireNonNull(content, "'content' cannot be null.");
this.length = length;
this.isReplayable = isReplayable;
}
@Override
public Long getLength() {
byte[] data = BYTES_UPDATER.get(this);
if (data != null) {
return (long) data.length;
}
return length;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
return BYTES_UPDATER.updateAndGet(this, bytes -> bytes == null ? getBytes() : bytes);
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return new ByteArrayInputStream(toBytes());
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
return content;
}
@Override
public boolean isReplayable() {
return isReplayable;
}
@Override
@Override
public Mono<BinaryDataContent> toReplayableContentAsync() {
if (isReplayable) {
return Mono.just(this);
}
FluxByteBufferContent replayableContent = cachedReplayableContent.get();
if (replayableContent != null) {
return Mono.just(replayableContent);
}
return bufferContent().cache().map(bufferedData -> {
Flux<ByteBuffer> bufferedFluxData = Flux.fromIterable(bufferedData).map(ByteBuffer::asReadOnlyBuffer);
FluxByteBufferContent bufferedBinaryDataContent = new FluxByteBufferContent(bufferedFluxData, length, true);
cachedReplayableContent.set(bufferedBinaryDataContent);
return bufferedBinaryDataContent;
});
}
private Mono<LinkedList<ByteBuffer>> bufferContent() {
return content.map(buffer -> {
ByteBuffer copy = ByteBuffer.allocate(buffer.remaining());
copy.put(buffer);
copy.flip();
return copy;
}).collect(LinkedList::new, LinkedList::add);
}
@Override
public BinaryDataContentType getContentType() {
return BinaryDataContentType.BINARY;
}
private byte[] getBytes() {
if (length != null && length > MAX_ARRAY_SIZE) {
throw LOGGER.logExceptionAsError(new IllegalStateException(TOO_LARGE_FOR_BYTE_ARRAY + length));
}
return FluxUtil.collectBytesInByteBufferStream(content)
.share()
.block();
}
} | class FluxByteBufferContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(FluxByteBufferContent.class);
private final Flux<ByteBuffer> content;
private final AtomicReference<FluxByteBufferContent> cachedReplayableContent = new AtomicReference<>();
private final Long length;
private final boolean isReplayable;
private volatile byte[] bytes;
private static final AtomicReferenceFieldUpdater<FluxByteBufferContent, byte[]> BYTES_UPDATER
= AtomicReferenceFieldUpdater.newUpdater(FluxByteBufferContent.class, byte[].class, "bytes");
/**
* Creates an instance of {@link FluxByteBufferContent}.
* @param content The content for this instance.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content) {
this(content, null);
}
/**
* Creates an instance of {@link FluxByteBufferContent}.
* @param content The content for this instance.
* @param length The length of the content in bytes.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content, Long length) {
this(content, length, false);
}
/**
* Creates an instance {@link FluxByteBufferContent} where replay-ability is configurable.
*
* @param content The content for this instance.
* @param length The length of the content in bytes.
* @param isReplayable Whether the content is replayable.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content, Long length, boolean isReplayable) {
this.content = Objects.requireNonNull(content, "'content' cannot be null.");
this.length = length;
this.isReplayable = isReplayable;
}
@Override
public Long getLength() {
byte[] data = BYTES_UPDATER.get(this);
if (data != null) {
return (long) data.length;
}
return length;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
return BYTES_UPDATER.updateAndGet(this, bytes -> bytes == null ? getBytes() : bytes);
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return new ByteArrayInputStream(toBytes());
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
return content;
}
@Override
public boolean isReplayable() {
return isReplayable;
}
@Override
@Override
public Mono<BinaryDataContent> toReplayableContentAsync() {
if (isReplayable) {
return Mono.just(this);
}
FluxByteBufferContent replayableContent = cachedReplayableContent.get();
if (replayableContent != null) {
return Mono.just(replayableContent);
}
return bufferContent().cache().map(bufferedData -> {
Flux<ByteBuffer> bufferedFluxData = Flux.fromIterable(bufferedData).map(ByteBuffer::asReadOnlyBuffer);
FluxByteBufferContent bufferedBinaryDataContent = new FluxByteBufferContent(bufferedFluxData, length, true);
cachedReplayableContent.set(bufferedBinaryDataContent);
return bufferedBinaryDataContent;
});
}
private Mono<LinkedList<ByteBuffer>> bufferContent() {
return content.map(buffer -> {
ByteBuffer copy = ByteBuffer.allocate(buffer.remaining());
copy.put(buffer);
copy.flip();
return copy;
}).collect(LinkedList::new, LinkedList::add);
}
@Override
public BinaryDataContentType getContentType() {
return BinaryDataContentType.BINARY;
}
private byte[] getBytes() {
if (length != null && length > MAX_ARRAY_SIZE) {
throw LOGGER.logExceptionAsError(new IllegalStateException(TOO_LARGE_FOR_BYTE_ARRAY + length));
}
return FluxUtil.collectBytesInByteBufferStream(content)
.share()
.block();
}
} |
Maybe, we didn't before so this doesn't change that behavior, so we may want to put that off until next release. The `publishOn` would get the handling of the `Flux` off any non-blocking thread, such as the Netty IO threads, right? And this could help more quickly free up those threads to take on another network request? | public BinaryDataContent toReplayableContent() {
if (isReplayable) {
return this;
}
FluxByteBufferContent replayableContent = cachedReplayableContent.get();
if (replayableContent != null) {
return replayableContent;
}
return bufferContent().map(bufferedData -> {
FluxByteBufferContent bufferedContent = new FluxByteBufferContent(Flux.fromIterable(bufferedData)
.map(ByteBuffer::duplicate), length, true);
cachedReplayableContent.set(bufferedContent);
return bufferedContent;
}).block();
} | }).block(); | public BinaryDataContent toReplayableContent() {
if (isReplayable) {
return this;
}
FluxByteBufferContent replayableContent = cachedReplayableContent.get();
if (replayableContent != null) {
return replayableContent;
}
return bufferContent().map(bufferedData -> {
FluxByteBufferContent bufferedContent = new FluxByteBufferContent(Flux.fromIterable(bufferedData)
.map(ByteBuffer::duplicate), length, true);
cachedReplayableContent.set(bufferedContent);
return bufferedContent;
}).block();
} | class FluxByteBufferContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(FluxByteBufferContent.class);
private final Flux<ByteBuffer> content;
private final AtomicReference<FluxByteBufferContent> cachedReplayableContent = new AtomicReference<>();
private final Long length;
private final boolean isReplayable;
private volatile byte[] bytes;
private static final AtomicReferenceFieldUpdater<FluxByteBufferContent, byte[]> BYTES_UPDATER
= AtomicReferenceFieldUpdater.newUpdater(FluxByteBufferContent.class, byte[].class, "bytes");
/**
* Creates an instance of {@link FluxByteBufferContent}.
* @param content The content for this instance.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content) {
this(content, null);
}
/**
* Creates an instance of {@link FluxByteBufferContent}.
* @param content The content for this instance.
* @param length The length of the content in bytes.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content, Long length) {
this(content, length, false);
}
/**
* Creates an instance {@link FluxByteBufferContent} where replay-ability is configurable.
*
* @param content The content for this instance.
* @param length The length of the content in bytes.
* @param isReplayable Whether the content is replayable.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content, Long length, boolean isReplayable) {
this.content = Objects.requireNonNull(content, "'content' cannot be null.");
this.length = length;
this.isReplayable = isReplayable;
}
@Override
public Long getLength() {
byte[] data = BYTES_UPDATER.get(this);
if (data != null) {
return (long) data.length;
}
return length;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
return BYTES_UPDATER.updateAndGet(this, bytes -> bytes == null ? getBytes() : bytes);
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return new ByteArrayInputStream(toBytes());
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
return content;
}
@Override
public boolean isReplayable() {
return isReplayable;
}
@Override
@Override
public Mono<BinaryDataContent> toReplayableContentAsync() {
if (isReplayable) {
return Mono.just(this);
}
FluxByteBufferContent replayableContent = cachedReplayableContent.get();
if (replayableContent != null) {
return Mono.just(replayableContent);
}
return bufferContent().cache().map(bufferedData -> {
Flux<ByteBuffer> bufferedFluxData = Flux.fromIterable(bufferedData).map(ByteBuffer::asReadOnlyBuffer);
FluxByteBufferContent bufferedBinaryDataContent = new FluxByteBufferContent(bufferedFluxData, length, true);
cachedReplayableContent.set(bufferedBinaryDataContent);
return bufferedBinaryDataContent;
});
}
private Mono<LinkedList<ByteBuffer>> bufferContent() {
return content.map(buffer -> {
ByteBuffer copy = ByteBuffer.allocate(buffer.remaining());
copy.put(buffer);
copy.flip();
return copy;
}).collect(LinkedList::new, LinkedList::add);
}
@Override
public BinaryDataContentType getContentType() {
return BinaryDataContentType.BINARY;
}
private byte[] getBytes() {
if (length != null && length > MAX_ARRAY_SIZE) {
throw LOGGER.logExceptionAsError(new IllegalStateException(TOO_LARGE_FOR_BYTE_ARRAY + length));
}
return FluxUtil.collectBytesInByteBufferStream(content)
.share()
.block();
}
} | class FluxByteBufferContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(FluxByteBufferContent.class);
private final Flux<ByteBuffer> content;
private final AtomicReference<FluxByteBufferContent> cachedReplayableContent = new AtomicReference<>();
private final Long length;
private final boolean isReplayable;
private volatile byte[] bytes;
private static final AtomicReferenceFieldUpdater<FluxByteBufferContent, byte[]> BYTES_UPDATER
= AtomicReferenceFieldUpdater.newUpdater(FluxByteBufferContent.class, byte[].class, "bytes");
/**
* Creates an instance of {@link FluxByteBufferContent}.
* @param content The content for this instance.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content) {
this(content, null);
}
/**
* Creates an instance of {@link FluxByteBufferContent}.
* @param content The content for this instance.
* @param length The length of the content in bytes.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content, Long length) {
this(content, length, false);
}
/**
* Creates an instance {@link FluxByteBufferContent} where replay-ability is configurable.
*
* @param content The content for this instance.
* @param length The length of the content in bytes.
* @param isReplayable Whether the content is replayable.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content, Long length, boolean isReplayable) {
this.content = Objects.requireNonNull(content, "'content' cannot be null.");
this.length = length;
this.isReplayable = isReplayable;
}
@Override
public Long getLength() {
byte[] data = BYTES_UPDATER.get(this);
if (data != null) {
return (long) data.length;
}
return length;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
return BYTES_UPDATER.updateAndGet(this, bytes -> bytes == null ? getBytes() : bytes);
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return new ByteArrayInputStream(toBytes());
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
return content;
}
@Override
public boolean isReplayable() {
return isReplayable;
}
@Override
@Override
public Mono<BinaryDataContent> toReplayableContentAsync() {
if (isReplayable) {
return Mono.just(this);
}
FluxByteBufferContent replayableContent = cachedReplayableContent.get();
if (replayableContent != null) {
return Mono.just(replayableContent);
}
return bufferContent().cache().map(bufferedData -> {
Flux<ByteBuffer> bufferedFluxData = Flux.fromIterable(bufferedData).map(ByteBuffer::asReadOnlyBuffer);
FluxByteBufferContent bufferedBinaryDataContent = new FluxByteBufferContent(bufferedFluxData, length, true);
cachedReplayableContent.set(bufferedBinaryDataContent);
return bufferedBinaryDataContent;
});
}
private Mono<LinkedList<ByteBuffer>> bufferContent() {
return content.map(buffer -> {
ByteBuffer copy = ByteBuffer.allocate(buffer.remaining());
copy.put(buffer);
copy.flip();
return copy;
}).collect(LinkedList::new, LinkedList::add);
}
@Override
public BinaryDataContentType getContentType() {
return BinaryDataContentType.BINARY;
}
private byte[] getBytes() {
if (length != null && length > MAX_ARRAY_SIZE) {
throw LOGGER.logExceptionAsError(new IllegalStateException(TOO_LARGE_FOR_BYTE_ARRAY + length));
}
return FluxUtil.collectBytesInByteBufferStream(content)
.share()
.block();
}
} |
Given this retains previous behavior, this will be held off on for now and reevaluated later. | public BinaryDataContent toReplayableContent() {
if (isReplayable) {
return this;
}
FluxByteBufferContent replayableContent = cachedReplayableContent.get();
if (replayableContent != null) {
return replayableContent;
}
return bufferContent().map(bufferedData -> {
FluxByteBufferContent bufferedContent = new FluxByteBufferContent(Flux.fromIterable(bufferedData)
.map(ByteBuffer::duplicate), length, true);
cachedReplayableContent.set(bufferedContent);
return bufferedContent;
}).block();
} | }).block(); | public BinaryDataContent toReplayableContent() {
if (isReplayable) {
return this;
}
FluxByteBufferContent replayableContent = cachedReplayableContent.get();
if (replayableContent != null) {
return replayableContent;
}
return bufferContent().map(bufferedData -> {
FluxByteBufferContent bufferedContent = new FluxByteBufferContent(Flux.fromIterable(bufferedData)
.map(ByteBuffer::duplicate), length, true);
cachedReplayableContent.set(bufferedContent);
return bufferedContent;
}).block();
} | class FluxByteBufferContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(FluxByteBufferContent.class);
private final Flux<ByteBuffer> content;
private final AtomicReference<FluxByteBufferContent> cachedReplayableContent = new AtomicReference<>();
private final Long length;
private final boolean isReplayable;
private volatile byte[] bytes;
private static final AtomicReferenceFieldUpdater<FluxByteBufferContent, byte[]> BYTES_UPDATER
= AtomicReferenceFieldUpdater.newUpdater(FluxByteBufferContent.class, byte[].class, "bytes");
/**
* Creates an instance of {@link FluxByteBufferContent}.
* @param content The content for this instance.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content) {
this(content, null);
}
/**
* Creates an instance of {@link FluxByteBufferContent}.
* @param content The content for this instance.
* @param length The length of the content in bytes.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content, Long length) {
this(content, length, false);
}
/**
* Creates an instance {@link FluxByteBufferContent} where replay-ability is configurable.
*
* @param content The content for this instance.
* @param length The length of the content in bytes.
* @param isReplayable Whether the content is replayable.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content, Long length, boolean isReplayable) {
this.content = Objects.requireNonNull(content, "'content' cannot be null.");
this.length = length;
this.isReplayable = isReplayable;
}
@Override
public Long getLength() {
byte[] data = BYTES_UPDATER.get(this);
if (data != null) {
return (long) data.length;
}
return length;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
return BYTES_UPDATER.updateAndGet(this, bytes -> bytes == null ? getBytes() : bytes);
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return new ByteArrayInputStream(toBytes());
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
return content;
}
@Override
public boolean isReplayable() {
return isReplayable;
}
@Override
@Override
public Mono<BinaryDataContent> toReplayableContentAsync() {
if (isReplayable) {
return Mono.just(this);
}
FluxByteBufferContent replayableContent = cachedReplayableContent.get();
if (replayableContent != null) {
return Mono.just(replayableContent);
}
return bufferContent().cache().map(bufferedData -> {
Flux<ByteBuffer> bufferedFluxData = Flux.fromIterable(bufferedData).map(ByteBuffer::asReadOnlyBuffer);
FluxByteBufferContent bufferedBinaryDataContent = new FluxByteBufferContent(bufferedFluxData, length, true);
cachedReplayableContent.set(bufferedBinaryDataContent);
return bufferedBinaryDataContent;
});
}
private Mono<LinkedList<ByteBuffer>> bufferContent() {
return content.map(buffer -> {
ByteBuffer copy = ByteBuffer.allocate(buffer.remaining());
copy.put(buffer);
copy.flip();
return copy;
}).collect(LinkedList::new, LinkedList::add);
}
@Override
public BinaryDataContentType getContentType() {
return BinaryDataContentType.BINARY;
}
private byte[] getBytes() {
if (length != null && length > MAX_ARRAY_SIZE) {
throw LOGGER.logExceptionAsError(new IllegalStateException(TOO_LARGE_FOR_BYTE_ARRAY + length));
}
return FluxUtil.collectBytesInByteBufferStream(content)
.share()
.block();
}
} | class FluxByteBufferContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(FluxByteBufferContent.class);
private final Flux<ByteBuffer> content;
private final AtomicReference<FluxByteBufferContent> cachedReplayableContent = new AtomicReference<>();
private final Long length;
private final boolean isReplayable;
private volatile byte[] bytes;
private static final AtomicReferenceFieldUpdater<FluxByteBufferContent, byte[]> BYTES_UPDATER
= AtomicReferenceFieldUpdater.newUpdater(FluxByteBufferContent.class, byte[].class, "bytes");
/**
* Creates an instance of {@link FluxByteBufferContent}.
* @param content The content for this instance.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content) {
this(content, null);
}
/**
* Creates an instance of {@link FluxByteBufferContent}.
* @param content The content for this instance.
* @param length The length of the content in bytes.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content, Long length) {
this(content, length, false);
}
/**
* Creates an instance {@link FluxByteBufferContent} where replay-ability is configurable.
*
* @param content The content for this instance.
* @param length The length of the content in bytes.
* @param isReplayable Whether the content is replayable.
* @throws NullPointerException if {@code content} is null.
*/
public FluxByteBufferContent(Flux<ByteBuffer> content, Long length, boolean isReplayable) {
this.content = Objects.requireNonNull(content, "'content' cannot be null.");
this.length = length;
this.isReplayable = isReplayable;
}
@Override
public Long getLength() {
byte[] data = BYTES_UPDATER.get(this);
if (data != null) {
return (long) data.length;
}
return length;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
return BYTES_UPDATER.updateAndGet(this, bytes -> bytes == null ? getBytes() : bytes);
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return new ByteArrayInputStream(toBytes());
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
return content;
}
@Override
public boolean isReplayable() {
return isReplayable;
}
@Override
@Override
public Mono<BinaryDataContent> toReplayableContentAsync() {
if (isReplayable) {
return Mono.just(this);
}
FluxByteBufferContent replayableContent = cachedReplayableContent.get();
if (replayableContent != null) {
return Mono.just(replayableContent);
}
return bufferContent().cache().map(bufferedData -> {
Flux<ByteBuffer> bufferedFluxData = Flux.fromIterable(bufferedData).map(ByteBuffer::asReadOnlyBuffer);
FluxByteBufferContent bufferedBinaryDataContent = new FluxByteBufferContent(bufferedFluxData, length, true);
cachedReplayableContent.set(bufferedBinaryDataContent);
return bufferedBinaryDataContent;
});
}
private Mono<LinkedList<ByteBuffer>> bufferContent() {
return content.map(buffer -> {
ByteBuffer copy = ByteBuffer.allocate(buffer.remaining());
copy.put(buffer);
copy.flip();
return copy;
}).collect(LinkedList::new, LinkedList::add);
}
@Override
public BinaryDataContentType getContentType() {
return BinaryDataContentType.BINARY;
}
private byte[] getBytes() {
if (length != null && length > MAX_ARRAY_SIZE) {
throw LOGGER.logExceptionAsError(new IllegalStateException(TOO_LARGE_FOR_BYTE_ARRAY + length));
}
return FluxUtil.collectBytesInByteBufferStream(content)
.share()
.block();
}
} |
is it ok that `LOG` is statically initialized above already? | public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (starterHasToBeDisabled(environment)) {
application.addInitializers(applicationContext -> LOG.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled."
+ " You can remove this message by adding the otel.sdk.disabled=true property."));
}
Map<String, Object> newProperties = buildNewProperties(environment);
PropertySource<?> propertySource = new MapPropertySource("newPropertiesForSpringMonitor", newProperties);
MutablePropertySources propertySources = environment.getPropertySources();
propertySources.addLast(propertySource);
} | application.addInitializers(applicationContext -> LOG.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled." | public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (starterHasToBeDisabled(environment)) {
application.addInitializers(applicationContext -> {
Logger log = LoggerFactory.getLogger(PropertiesPostProcessor.class);
log.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled."
+ " You can remove this message by adding the otel.sdk.disabled=true property.");
});
}
Map<String, Object> newProperties = buildNewProperties(environment);
PropertySource<?> propertySource = new MapPropertySource("newPropertiesForSpringMonitor", newProperties);
MutablePropertySources propertySources = environment.getPropertySources();
propertySources.addLast(propertySource);
} | class PropertiesPostProcessor implements EnvironmentPostProcessor, Ordered {
private static final Logger LOG = LoggerFactory.getLogger(PropertiesPostProcessor.class);
@Override
private static Map<String, Object> buildNewProperties(ConfigurableEnvironment environment) {
if (starterHasToBeDisabled(environment)) {
return Collections.singletonMap("otel.sdk.disabled", true);
}
return Collections.singletonMap("otel.exporter.otlp.enabled", false);
}
private static boolean starterHasToBeDisabled(ConfigurableEnvironment environment) {
return !isNativeRuntimeExecution()
&& !isStarterDisabled(environment) && applicationInsightsAgentIsAttached();
}
private static boolean isNativeRuntimeExecution() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static boolean isStarterDisabled(ConfigurableEnvironment environment) {
String otelSdkDisabled = environment.getProperty("otel.sdk.disabled", "false");
return "true".equalsIgnoreCase(otelSdkDisabled);
}
private static boolean applicationInsightsAgentIsAttached() {
try {
Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null);
return true;
} catch (ClassNotFoundException e) {
return false;
}
}
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 1;
}
} | class PropertiesPostProcessor implements EnvironmentPostProcessor, Ordered {
private static final Logger LOG = LoggerFactory.getLogger(PropertiesPostProcessor.class);
@Override
private static Map<String, Object> buildNewProperties(ConfigurableEnvironment environment) {
if (starterHasToBeDisabled(environment)) {
return Collections.singletonMap("otel.sdk.disabled", true);
}
return Collections.singletonMap("otel.exporter.otlp.enabled", false);
}
private static boolean starterHasToBeDisabled(ConfigurableEnvironment environment) {
return !isNativeRuntimeExecution()
&& !isStarterDisabled(environment) && applicationInsightsAgentIsAttached();
}
private static boolean isNativeRuntimeExecution() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static boolean isStarterDisabled(ConfigurableEnvironment environment) {
String otelSdkDisabled = environment.getProperty("otel.sdk.disabled", "false");
return Boolean.parseBoolean(otelSdkDisabled);
}
private static boolean applicationInsightsAgentIsAttached() {
try {
Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null);
return true;
} catch (ClassNotFoundException e) {
return false;
}
}
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 1;
}
} |
will this be true if `runtime-attach` artifact is (unused) dependency? | private static boolean applicationInsightsAgentIsAttached() {
try {
Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null);
return true;
} catch (ClassNotFoundException e) {
return false;
}
} | Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null); | private static boolean applicationInsightsAgentIsAttached() {
try {
Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null);
return true;
} catch (ClassNotFoundException e) {
return false;
}
} | class PropertiesPostProcessor implements EnvironmentPostProcessor, Ordered {
private static final Logger LOG = LoggerFactory.getLogger(PropertiesPostProcessor.class);
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (starterHasToBeDisabled(environment)) {
application.addInitializers(applicationContext -> LOG.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled."
+ " You can remove this message by adding the otel.sdk.disabled=true property."));
}
Map<String, Object> newProperties = buildNewProperties(environment);
PropertySource<?> propertySource = new MapPropertySource("newPropertiesForSpringMonitor", newProperties);
MutablePropertySources propertySources = environment.getPropertySources();
propertySources.addLast(propertySource);
}
private static Map<String, Object> buildNewProperties(ConfigurableEnvironment environment) {
if (starterHasToBeDisabled(environment)) {
return Collections.singletonMap("otel.sdk.disabled", true);
}
return Collections.singletonMap("otel.exporter.otlp.enabled", false);
}
private static boolean starterHasToBeDisabled(ConfigurableEnvironment environment) {
return !isNativeRuntimeExecution()
&& !isStarterDisabled(environment) && applicationInsightsAgentIsAttached();
}
private static boolean isNativeRuntimeExecution() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static boolean isStarterDisabled(ConfigurableEnvironment environment) {
String otelSdkDisabled = environment.getProperty("otel.sdk.disabled", "false");
return "true".equalsIgnoreCase(otelSdkDisabled);
}
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 1;
}
} | class PropertiesPostProcessor implements EnvironmentPostProcessor, Ordered {
private static final Logger LOG = LoggerFactory.getLogger(PropertiesPostProcessor.class);
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (starterHasToBeDisabled(environment)) {
application.addInitializers(applicationContext -> {
Logger log = LoggerFactory.getLogger(PropertiesPostProcessor.class);
log.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled."
+ " You can remove this message by adding the otel.sdk.disabled=true property.");
});
}
Map<String, Object> newProperties = buildNewProperties(environment);
PropertySource<?> propertySource = new MapPropertySource("newPropertiesForSpringMonitor", newProperties);
MutablePropertySources propertySources = environment.getPropertySources();
propertySources.addLast(propertySource);
}
private static Map<String, Object> buildNewProperties(ConfigurableEnvironment environment) {
if (starterHasToBeDisabled(environment)) {
return Collections.singletonMap("otel.sdk.disabled", true);
}
return Collections.singletonMap("otel.exporter.otlp.enabled", false);
}
private static boolean starterHasToBeDisabled(ConfigurableEnvironment environment) {
return !isNativeRuntimeExecution()
&& !isStarterDisabled(environment) && applicationInsightsAgentIsAttached();
}
private static boolean isNativeRuntimeExecution() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static boolean isStarterDisabled(ConfigurableEnvironment environment) {
String otelSdkDisabled = environment.getProperty("otel.sdk.disabled", "false");
return Boolean.parseBoolean(otelSdkDisabled);
}
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 1;
}
} |
```suggestion return Boolean.parseBoolean(otelSdkDisabled); ``` | private static boolean isStarterDisabled(ConfigurableEnvironment environment) {
String otelSdkDisabled = environment.getProperty("otel.sdk.disabled", "false");
return "true".equalsIgnoreCase(otelSdkDisabled);
} | return "true".equalsIgnoreCase(otelSdkDisabled); | private static boolean isStarterDisabled(ConfigurableEnvironment environment) {
String otelSdkDisabled = environment.getProperty("otel.sdk.disabled", "false");
return Boolean.parseBoolean(otelSdkDisabled);
} | class PropertiesPostProcessor implements EnvironmentPostProcessor, Ordered {
private static final Logger LOG = LoggerFactory.getLogger(PropertiesPostProcessor.class);
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (starterHasToBeDisabled(environment)) {
application.addInitializers(applicationContext -> LOG.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled."
+ " You can remove this message by adding the otel.sdk.disabled=true property."));
}
Map<String, Object> newProperties = buildNewProperties(environment);
PropertySource<?> propertySource = new MapPropertySource("newPropertiesForSpringMonitor", newProperties);
MutablePropertySources propertySources = environment.getPropertySources();
propertySources.addLast(propertySource);
}
private static Map<String, Object> buildNewProperties(ConfigurableEnvironment environment) {
if (starterHasToBeDisabled(environment)) {
return Collections.singletonMap("otel.sdk.disabled", true);
}
return Collections.singletonMap("otel.exporter.otlp.enabled", false);
}
private static boolean starterHasToBeDisabled(ConfigurableEnvironment environment) {
return !isNativeRuntimeExecution()
&& !isStarterDisabled(environment) && applicationInsightsAgentIsAttached();
}
private static boolean isNativeRuntimeExecution() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static boolean applicationInsightsAgentIsAttached() {
try {
Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null);
return true;
} catch (ClassNotFoundException e) {
return false;
}
}
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 1;
}
} | class PropertiesPostProcessor implements EnvironmentPostProcessor, Ordered {
private static final Logger LOG = LoggerFactory.getLogger(PropertiesPostProcessor.class);
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (starterHasToBeDisabled(environment)) {
application.addInitializers(applicationContext -> {
Logger log = LoggerFactory.getLogger(PropertiesPostProcessor.class);
log.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled."
+ " You can remove this message by adding the otel.sdk.disabled=true property.");
});
}
Map<String, Object> newProperties = buildNewProperties(environment);
PropertySource<?> propertySource = new MapPropertySource("newPropertiesForSpringMonitor", newProperties);
MutablePropertySources propertySources = environment.getPropertySources();
propertySources.addLast(propertySource);
}
private static Map<String, Object> buildNewProperties(ConfigurableEnvironment environment) {
if (starterHasToBeDisabled(environment)) {
return Collections.singletonMap("otel.sdk.disabled", true);
}
return Collections.singletonMap("otel.exporter.otlp.enabled", false);
}
private static boolean starterHasToBeDisabled(ConfigurableEnvironment environment) {
return !isNativeRuntimeExecution()
&& !isStarterDisabled(environment) && applicationInsightsAgentIsAttached();
}
private static boolean isNativeRuntimeExecution() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static boolean applicationInsightsAgentIsAttached() {
try {
Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null);
return true;
} catch (ClassNotFoundException e) {
return false;
}
}
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 1;
}
} |
is the native check needed? ```suggestion return !isStarterDisabled(environment) && applicationInsightsAgentIsAttached(); ``` | private static boolean starterHasToBeDisabled(ConfigurableEnvironment environment) {
return !isNativeRuntimeExecution()
&& !isStarterDisabled(environment) && applicationInsightsAgentIsAttached();
} | && !isStarterDisabled(environment) && applicationInsightsAgentIsAttached(); | private static boolean starterHasToBeDisabled(ConfigurableEnvironment environment) {
return !isNativeRuntimeExecution()
&& !isStarterDisabled(environment) && applicationInsightsAgentIsAttached();
} | class PropertiesPostProcessor implements EnvironmentPostProcessor, Ordered {
private static final Logger LOG = LoggerFactory.getLogger(PropertiesPostProcessor.class);
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (starterHasToBeDisabled(environment)) {
application.addInitializers(applicationContext -> LOG.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled."
+ " You can remove this message by adding the otel.sdk.disabled=true property."));
}
Map<String, Object> newProperties = buildNewProperties(environment);
PropertySource<?> propertySource = new MapPropertySource("newPropertiesForSpringMonitor", newProperties);
MutablePropertySources propertySources = environment.getPropertySources();
propertySources.addLast(propertySource);
}
private static Map<String, Object> buildNewProperties(ConfigurableEnvironment environment) {
if (starterHasToBeDisabled(environment)) {
return Collections.singletonMap("otel.sdk.disabled", true);
}
return Collections.singletonMap("otel.exporter.otlp.enabled", false);
}
private static boolean isNativeRuntimeExecution() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static boolean isStarterDisabled(ConfigurableEnvironment environment) {
String otelSdkDisabled = environment.getProperty("otel.sdk.disabled", "false");
return "true".equalsIgnoreCase(otelSdkDisabled);
}
private static boolean applicationInsightsAgentIsAttached() {
try {
Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null);
return true;
} catch (ClassNotFoundException e) {
return false;
}
}
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 1;
}
} | class PropertiesPostProcessor implements EnvironmentPostProcessor, Ordered {
private static final Logger LOG = LoggerFactory.getLogger(PropertiesPostProcessor.class);
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (starterHasToBeDisabled(environment)) {
application.addInitializers(applicationContext -> {
Logger log = LoggerFactory.getLogger(PropertiesPostProcessor.class);
log.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled."
+ " You can remove this message by adding the otel.sdk.disabled=true property.");
});
}
Map<String, Object> newProperties = buildNewProperties(environment);
PropertySource<?> propertySource = new MapPropertySource("newPropertiesForSpringMonitor", newProperties);
MutablePropertySources propertySources = environment.getPropertySources();
propertySources.addLast(propertySource);
}
private static Map<String, Object> buildNewProperties(ConfigurableEnvironment environment) {
if (starterHasToBeDisabled(environment)) {
return Collections.singletonMap("otel.sdk.disabled", true);
}
return Collections.singletonMap("otel.exporter.otlp.enabled", false);
}
private static boolean isNativeRuntimeExecution() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static boolean isStarterDisabled(ConfigurableEnvironment environment) {
String otelSdkDisabled = environment.getProperty("otel.sdk.disabled", "false");
return Boolean.parseBoolean(otelSdkDisabled);
}
private static boolean applicationInsightsAgentIsAttached() {
try {
Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null);
return true;
} catch (ClassNotFoundException e) {
return false;
}
}
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 1;
}
} |
It is only for the native case because OTel and App Insights Java agents are not supported with native images. | private static boolean starterHasToBeDisabled(ConfigurableEnvironment environment) {
return !isNativeRuntimeExecution()
&& !isStarterDisabled(environment) && applicationInsightsAgentIsAttached();
} | && !isStarterDisabled(environment) && applicationInsightsAgentIsAttached(); | private static boolean starterHasToBeDisabled(ConfigurableEnvironment environment) {
return !isNativeRuntimeExecution()
&& !isStarterDisabled(environment) && applicationInsightsAgentIsAttached();
} | class PropertiesPostProcessor implements EnvironmentPostProcessor, Ordered {
private static final Logger LOG = LoggerFactory.getLogger(PropertiesPostProcessor.class);
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (starterHasToBeDisabled(environment)) {
application.addInitializers(applicationContext -> LOG.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled."
+ " You can remove this message by adding the otel.sdk.disabled=true property."));
}
Map<String, Object> newProperties = buildNewProperties(environment);
PropertySource<?> propertySource = new MapPropertySource("newPropertiesForSpringMonitor", newProperties);
MutablePropertySources propertySources = environment.getPropertySources();
propertySources.addLast(propertySource);
}
private static Map<String, Object> buildNewProperties(ConfigurableEnvironment environment) {
if (starterHasToBeDisabled(environment)) {
return Collections.singletonMap("otel.sdk.disabled", true);
}
return Collections.singletonMap("otel.exporter.otlp.enabled", false);
}
private static boolean isNativeRuntimeExecution() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static boolean isStarterDisabled(ConfigurableEnvironment environment) {
String otelSdkDisabled = environment.getProperty("otel.sdk.disabled", "false");
return "true".equalsIgnoreCase(otelSdkDisabled);
}
private static boolean applicationInsightsAgentIsAttached() {
try {
Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null);
return true;
} catch (ClassNotFoundException e) {
return false;
}
}
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 1;
}
} | class PropertiesPostProcessor implements EnvironmentPostProcessor, Ordered {
private static final Logger LOG = LoggerFactory.getLogger(PropertiesPostProcessor.class);
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (starterHasToBeDisabled(environment)) {
application.addInitializers(applicationContext -> {
Logger log = LoggerFactory.getLogger(PropertiesPostProcessor.class);
log.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled."
+ " You can remove this message by adding the otel.sdk.disabled=true property.");
});
}
Map<String, Object> newProperties = buildNewProperties(environment);
PropertySource<?> propertySource = new MapPropertySource("newPropertiesForSpringMonitor", newProperties);
MutablePropertySources propertySources = environment.getPropertySources();
propertySources.addLast(propertySource);
}
private static Map<String, Object> buildNewProperties(ConfigurableEnvironment environment) {
if (starterHasToBeDisabled(environment)) {
return Collections.singletonMap("otel.sdk.disabled", true);
}
return Collections.singletonMap("otel.exporter.otlp.enabled", false);
}
private static boolean isNativeRuntimeExecution() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static boolean isStarterDisabled(ConfigurableEnvironment environment) {
String otelSdkDisabled = environment.getProperty("otel.sdk.disabled", "false");
return Boolean.parseBoolean(otelSdkDisabled);
}
private static boolean applicationInsightsAgentIsAttached() {
try {
Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null);
return true;
} catch (ClassNotFoundException e) {
return false;
}
}
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 1;
}
} |
No, it will be false in this case. The Class.forName is applied for the bootstrap class loader. | private static boolean applicationInsightsAgentIsAttached() {
try {
Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null);
return true;
} catch (ClassNotFoundException e) {
return false;
}
} | Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null); | private static boolean applicationInsightsAgentIsAttached() {
try {
Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null);
return true;
} catch (ClassNotFoundException e) {
return false;
}
} | class PropertiesPostProcessor implements EnvironmentPostProcessor, Ordered {
private static final Logger LOG = LoggerFactory.getLogger(PropertiesPostProcessor.class);
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (starterHasToBeDisabled(environment)) {
application.addInitializers(applicationContext -> LOG.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled."
+ " You can remove this message by adding the otel.sdk.disabled=true property."));
}
Map<String, Object> newProperties = buildNewProperties(environment);
PropertySource<?> propertySource = new MapPropertySource("newPropertiesForSpringMonitor", newProperties);
MutablePropertySources propertySources = environment.getPropertySources();
propertySources.addLast(propertySource);
}
private static Map<String, Object> buildNewProperties(ConfigurableEnvironment environment) {
if (starterHasToBeDisabled(environment)) {
return Collections.singletonMap("otel.sdk.disabled", true);
}
return Collections.singletonMap("otel.exporter.otlp.enabled", false);
}
private static boolean starterHasToBeDisabled(ConfigurableEnvironment environment) {
return !isNativeRuntimeExecution()
&& !isStarterDisabled(environment) && applicationInsightsAgentIsAttached();
}
private static boolean isNativeRuntimeExecution() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static boolean isStarterDisabled(ConfigurableEnvironment environment) {
String otelSdkDisabled = environment.getProperty("otel.sdk.disabled", "false");
return "true".equalsIgnoreCase(otelSdkDisabled);
}
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 1;
}
} | class PropertiesPostProcessor implements EnvironmentPostProcessor, Ordered {
private static final Logger LOG = LoggerFactory.getLogger(PropertiesPostProcessor.class);
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (starterHasToBeDisabled(environment)) {
application.addInitializers(applicationContext -> {
Logger log = LoggerFactory.getLogger(PropertiesPostProcessor.class);
log.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled."
+ " You can remove this message by adding the otel.sdk.disabled=true property.");
});
}
Map<String, Object> newProperties = buildNewProperties(environment);
PropertySource<?> propertySource = new MapPropertySource("newPropertiesForSpringMonitor", newProperties);
MutablePropertySources propertySources = environment.getPropertySources();
propertySources.addLast(propertySource);
}
private static Map<String, Object> buildNewProperties(ConfigurableEnvironment environment) {
if (starterHasToBeDisabled(environment)) {
return Collections.singletonMap("otel.sdk.disabled", true);
}
return Collections.singletonMap("otel.exporter.otlp.enabled", false);
}
private static boolean starterHasToBeDisabled(ConfigurableEnvironment environment) {
return !isNativeRuntimeExecution()
&& !isStarterDisabled(environment) && applicationInsightsAgentIsAttached();
}
private static boolean isNativeRuntimeExecution() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static boolean isStarterDisabled(ConfigurableEnvironment environment) {
String otelSdkDisabled = environment.getProperty("otel.sdk.disabled", "false");
return Boolean.parseBoolean(otelSdkDisabled);
}
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 1;
}
} |
The log message is printed in this way. I have included the log creation in the ApplicationContextInitializer now to prevent its usage in other code places. | public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (starterHasToBeDisabled(environment)) {
application.addInitializers(applicationContext -> LOG.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled."
+ " You can remove this message by adding the otel.sdk.disabled=true property."));
}
Map<String, Object> newProperties = buildNewProperties(environment);
PropertySource<?> propertySource = new MapPropertySource("newPropertiesForSpringMonitor", newProperties);
MutablePropertySources propertySources = environment.getPropertySources();
propertySources.addLast(propertySource);
} | application.addInitializers(applicationContext -> LOG.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled." | public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (starterHasToBeDisabled(environment)) {
application.addInitializers(applicationContext -> {
Logger log = LoggerFactory.getLogger(PropertiesPostProcessor.class);
log.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled."
+ " You can remove this message by adding the otel.sdk.disabled=true property.");
});
}
Map<String, Object> newProperties = buildNewProperties(environment);
PropertySource<?> propertySource = new MapPropertySource("newPropertiesForSpringMonitor", newProperties);
MutablePropertySources propertySources = environment.getPropertySources();
propertySources.addLast(propertySource);
} | class PropertiesPostProcessor implements EnvironmentPostProcessor, Ordered {
private static final Logger LOG = LoggerFactory.getLogger(PropertiesPostProcessor.class);
@Override
private static Map<String, Object> buildNewProperties(ConfigurableEnvironment environment) {
if (starterHasToBeDisabled(environment)) {
return Collections.singletonMap("otel.sdk.disabled", true);
}
return Collections.singletonMap("otel.exporter.otlp.enabled", false);
}
private static boolean starterHasToBeDisabled(ConfigurableEnvironment environment) {
return !isNativeRuntimeExecution()
&& !isStarterDisabled(environment) && applicationInsightsAgentIsAttached();
}
private static boolean isNativeRuntimeExecution() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static boolean isStarterDisabled(ConfigurableEnvironment environment) {
String otelSdkDisabled = environment.getProperty("otel.sdk.disabled", "false");
return "true".equalsIgnoreCase(otelSdkDisabled);
}
private static boolean applicationInsightsAgentIsAttached() {
try {
Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null);
return true;
} catch (ClassNotFoundException e) {
return false;
}
}
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 1;
}
} | class PropertiesPostProcessor implements EnvironmentPostProcessor, Ordered {
private static final Logger LOG = LoggerFactory.getLogger(PropertiesPostProcessor.class);
@Override
private static Map<String, Object> buildNewProperties(ConfigurableEnvironment environment) {
if (starterHasToBeDisabled(environment)) {
return Collections.singletonMap("otel.sdk.disabled", true);
}
return Collections.singletonMap("otel.exporter.otlp.enabled", false);
}
private static boolean starterHasToBeDisabled(ConfigurableEnvironment environment) {
return !isNativeRuntimeExecution()
&& !isStarterDisabled(environment) && applicationInsightsAgentIsAttached();
}
private static boolean isNativeRuntimeExecution() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static boolean isStarterDisabled(ConfigurableEnvironment environment) {
String otelSdkDisabled = environment.getProperty("otel.sdk.disabled", "false");
return Boolean.parseBoolean(otelSdkDisabled);
}
private static boolean applicationInsightsAgentIsAttached() {
try {
Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null);
return true;
} catch (ClassNotFoundException e) {
return false;
}
}
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 1;
}
} |
Question: is `1` because this subscriber is effectively synchronous and will only be one request to the upstream Publisher? | public void onSubscribe(Subscription s) {
if (Operators.validate(subscription, s)) {
subscription = s;
s.request(1);
}
} | s.request(1); | public void onSubscribe(Subscription s) {
if (Operators.validate(subscription, s)) {
subscription = s;
s.request(1);
}
} | class VertxRequestWriteSubscriber implements Subscriber<ByteBuffer> {
private static final ClientLogger LOGGER = new ClientLogger(VertxRequestWriteSubscriber.class);
private final HttpClientRequest request;
private final MonoSink<HttpResponse> emitter;
private final ProgressReporter progressReporter;
private volatile Subscription subscription;
private volatile State state = State.UNINITIALIZED;
private volatile Throwable error;
public VertxRequestWriteSubscriber(HttpClientRequest request, MonoSink<HttpResponse> emitter,
ProgressReporter progressReporter) {
this.request = request.exceptionHandler(this::onError)
.drainHandler(ignored -> requestNext());
this.emitter = emitter;
this.progressReporter = progressReporter;
}
@Override
@Override
public void onNext(ByteBuffer bytes) {
try {
if (state == State.WRITING) {
onErrorInternal(new IllegalStateException("Received onNext while processing another write operation."));
} else {
state = State.WRITING;
write(bytes);
}
} catch (Exception ex) {
onErrorInternal(ex);
}
}
@SuppressWarnings("deprecation")
private void write(ByteBuffer bytes) {
int remaining = bytes.remaining();
request.write(Buffer.buffer(Unpooled.wrappedBuffer(bytes)), result -> {
State state = this.state;
if (state == State.WRITING) {
this.state = State.UNINITIALIZED;
}
if (result.succeeded()) {
if (progressReporter != null) {
progressReporter.reportProgress(remaining);
}
if (state == State.WRITING) {
if (!request.writeQueueFull()) {
requestNext();
}
} else if (state == State.COMPLETE) {
endRequest();
} else if (state == State.ERROR) {
resetRequest(error);
}
} else {
this.state = State.ERROR;
resetRequest(result.cause());
}
});
}
private void requestNext() {
if (state == State.UNINITIALIZED) {
subscription.request(1);
}
}
@Override
public void onError(Throwable throwable) {
onErrorInternal(throwable);
}
private void onErrorInternal(Throwable throwable) {
State state = this.state;
if (state.code >= 2) {
Operators.onErrorDropped(throwable, Context.of(emitter.contextView()));
}
this.state = State.ERROR;
if (state != State.WRITING) {
resetRequest(throwable);
} else {
error = throwable;
}
}
private void resetRequest(Throwable throwable) {
subscription.cancel();
emitter.error(LOGGER.logThrowableAsError(throwable));
request.reset(0, throwable);
}
@Override
public void onComplete() {
State state = this.state;
if (state.code >= 2) {
return;
}
this.state = State.COMPLETE;
if (state != State.WRITING) {
endRequest();
}
}
private void endRequest() {
request.end(result -> {
if (result.failed()) {
emitter.error(result.cause());
}
});
}
private enum State {
UNINITIALIZED(0),
WRITING(1),
COMPLETE(2),
ERROR(3);
private final int code;
State(int code) {
this.code = code;
}
}
} | class VertxRequestWriteSubscriber implements Subscriber<ByteBuffer> {
private static final ClientLogger LOGGER = new ClientLogger(VertxRequestWriteSubscriber.class);
private final HttpClientRequest request;
private final MonoSink<HttpResponse> emitter;
private final ProgressReporter progressReporter;
private volatile Subscription subscription;
private volatile State state = State.UNINITIALIZED;
private volatile Throwable error;
public VertxRequestWriteSubscriber(HttpClientRequest request, MonoSink<HttpResponse> emitter,
ProgressReporter progressReporter) {
this.request = request.exceptionHandler(this::onError)
.drainHandler(ignored -> requestNext());
this.emitter = emitter;
this.progressReporter = progressReporter;
}
@Override
@Override
public void onNext(ByteBuffer bytes) {
try {
if (state == State.WRITING) {
onErrorInternal(new IllegalStateException("Received onNext while processing another write operation."));
} else {
state = State.WRITING;
write(bytes);
}
} catch (Exception ex) {
onErrorInternal(ex);
}
}
@SuppressWarnings("deprecation")
private void write(ByteBuffer bytes) {
int remaining = bytes.remaining();
request.write(Buffer.buffer(Unpooled.wrappedBuffer(bytes)), result -> {
State state = this.state;
if (state == State.WRITING) {
this.state = State.UNINITIALIZED;
}
if (result.succeeded()) {
if (progressReporter != null) {
progressReporter.reportProgress(remaining);
}
if (state == State.WRITING) {
if (!request.writeQueueFull()) {
requestNext();
}
} else if (state == State.COMPLETE) {
endRequest();
} else if (state == State.ERROR) {
resetRequest(error);
}
} else {
this.state = State.ERROR;
resetRequest(result.cause());
}
});
}
private void requestNext() {
if (state == State.UNINITIALIZED) {
subscription.request(1);
}
}
@Override
public void onError(Throwable throwable) {
onErrorInternal(throwable);
}
private void onErrorInternal(Throwable throwable) {
State state = this.state;
if (state.code >= 2) {
Operators.onErrorDropped(throwable, Context.of(emitter.contextView()));
}
this.state = State.ERROR;
if (state != State.WRITING) {
resetRequest(throwable);
} else {
error = throwable;
}
}
private void resetRequest(Throwable throwable) {
subscription.cancel();
emitter.error(LOGGER.logThrowableAsError(throwable));
request.reset(0, throwable);
}
@Override
public void onComplete() {
State state = this.state;
if (state.code >= 2) {
return;
}
this.state = State.COMPLETE;
if (state != State.WRITING) {
endRequest();
}
}
private void endRequest() {
request.end(result -> {
if (result.failed()) {
emitter.error(result.cause());
}
});
}
private enum State {
UNINITIALIZED(0),
WRITING(1),
COMPLETE(2),
ERROR(3);
private final int code;
State(int code) {
this.code = code;
}
}
} |
NIT - worth adding a Math.max(MINIMUM_BACKOFF_TIME_IN_MS, ...) as a safety net - I assume above is now safe for numeric overflows - but IMO would just make that assumption explicit | public Mono<ShouldRetryResult> shouldRetry(Exception exception) {
Duration backoffTime;
Duration timeout;
if (!(exception instanceof RetryWithException)) {
logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount.get(),
exception);
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
RetryWithException lastRetryWithException = (RetryWithException)exception;
GoneAndRetryWithRetryPolicy.this.lastRetryWithException = lastRetryWithException;
long remainingMilliseconds =
(this.waitTimeInSeconds * 1_000L) -
GoneAndRetryWithRetryPolicy.this.getElapsedTime().toMillis();
int currentRetryAttemptCount = this.attemptCount.getAndIncrement();
if (remainingMilliseconds <= 0) {
logger.warn("Received RetryWithException after backoff/retry. Will fail the request.",
lastRetryWithException);
return Mono.just(ShouldRetryResult.error(lastRetryWithException));
}
backoffTime = Duration.ofMillis(
Math.min(
Math.min(this.currentBackoffMilliseconds.get() + random.nextInt(RANDOM_SALT_IN_MS), remainingMilliseconds),
RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS));
this.currentBackoffMilliseconds.set(
Math.min(
RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS,
this.currentBackoffMilliseconds.get() * RetryWithRetryPolicy.BACK_OFF_MULTIPLIER)
);
logger.debug("BackoffTime: {} ms.", backoffTime.toMillis());
long timeoutInMillSec = remainingMilliseconds - backoffTime.toMillis();
timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec)
: Duration.ofMillis(RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS);
logger.debug("Received RetryWithException, will retry, ", exception);
return Mono.just(ShouldRetryResult.retryAfter(backoffTime,
Quadruple.with(false, true, timeout, currentRetryAttemptCount)));
} | Math.min( | public Mono<ShouldRetryResult> shouldRetry(Exception exception) {
return this.retryWithRetryPolicy.shouldRetry(exception)
.flatMap((retryWithResult) -> {
if (!retryWithResult.nonRelatedException) {
return Mono.just(retryWithResult);
}
return this.goneRetryPolicy.shouldRetry(exception)
.flatMap((goneRetryResult) -> {
if (!goneRetryResult.shouldRetry) {
logger.debug("Operation will NOT be retried. Exception:",
exception);
this.end = Instant.now();
}
return Mono.just(goneRetryResult);
});
});
} | class GoneAndRetryWithRetryPolicy implements IRetryPolicy {
private final static Logger logger = LoggerFactory.getLogger(GoneAndRetryWithRetryPolicy.class);
private final GoneRetryPolicy goneRetryPolicy;
private final RetryWithRetryPolicy retryWithRetryPolicy;
private final Instant start;
private volatile Instant end;
private volatile RetryWithException lastRetryWithException;
private RetryContext retryContext;
private static final ThreadLocalRandom random = ThreadLocalRandom.current();
public GoneAndRetryWithRetryPolicy(RxDocumentServiceRequest request, Integer waitTimeInSeconds) {
this.retryContext = BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics);
this.goneRetryPolicy = new GoneRetryPolicy(
request,
waitTimeInSeconds,
this.retryContext
);
this.retryWithRetryPolicy = new RetryWithRetryPolicy(waitTimeInSeconds, this.retryContext);
this.start = Instant.now();
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private Duration getElapsedTime() {
Instant endSnapshot = this.end != null ? this.end : Instant.now();
return Duration.between(this.start, endSnapshot);
}
class GoneRetryPolicy implements IRetryPolicy {
private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30;
private final static int MAXIMUM_BACKOFF_TIME_IN_SECONDS = 15;
private final static int INITIAL_BACKOFF_TIME = 1;
private final static int BACK_OFF_MULTIPLIER = 2;
private final RxDocumentServiceRequest request;
private final AtomicInteger attemptCount = new AtomicInteger(1);
private final AtomicInteger attemptCountInvalidPartition = new AtomicInteger(1);
private final AtomicInteger currentBackoffSeconds = new AtomicInteger(GoneRetryPolicy.INITIAL_BACKOFF_TIME);
private final int waitTimeInSeconds;
private RetryContext retryContext;
public GoneRetryPolicy(
RxDocumentServiceRequest request,
Integer waitTimeInSeconds,
RetryContext retryContext) {
checkNotNull(request, "request must not be null.");
this.request = request;
this.waitTimeInSeconds = waitTimeInSeconds != null ? waitTimeInSeconds : DEFAULT_WAIT_TIME_IN_SECONDS;
this.retryContext = retryContext;
}
private boolean isNonRetryableException(Exception exception) {
if (exception instanceof GoneException ||
exception instanceof PartitionIsMigratingException ||
exception instanceof PartitionKeyRangeIsSplittingException) {
return false;
}
if (exception instanceof InvalidPartitionException) {
return this.request.getPartitionKeyRangeIdentity() != null &&
this.request.getPartitionKeyRangeIdentity().getCollectionRid() != null;
}
return true;
}
private CosmosException logAndWrapExceptionWithLastRetryWithException(Exception exception) {
String exceptionType;
if (exception instanceof GoneException) {
exceptionType = "GoneException";
} else if (exception instanceof PartitionKeyRangeGoneException) {
exceptionType = "PartitionKeyRangeGoneException";
} else if (exception instanceof InvalidPartitionException) {
exceptionType = "InvalidPartitionException";
} else if (exception instanceof PartitionKeyRangeIsSplittingException) {
exceptionType = "PartitionKeyRangeIsSplittingException";
} else if (exception instanceof CosmosException) {
logger.warn("Received CosmosException after backoff/retry. Will fail the request.",
exception);
return (CosmosException)exception;
} else {
throw new IllegalStateException("Invalid exception type", exception);
}
RetryWithException lastRetryWithExceptionSnapshot =
GoneAndRetryWithRetryPolicy.this.lastRetryWithException;
if (lastRetryWithExceptionSnapshot != null) {
logger.warn(
"Received {} after backoff/retry including at least one RetryWithException. "
+ "Will fail the request with RetryWithException. {}: {}. RetryWithException: {}",
exceptionType,
exceptionType,
exception,
lastRetryWithExceptionSnapshot);
return lastRetryWithExceptionSnapshot;
}
logger.warn(
"Received {} after backoff/retry. Will fail the request. {}",
exceptionType,
exception);
int subStatusCode = getExceptionSubStatusCodeForGoneRetryPolicy(exception);
return BridgeInternal.createServiceUnavailableException(exception, subStatusCode);
}
private int getExceptionSubStatusCodeForGoneRetryPolicy(Exception exception) {
int exceptionSubStatusCode = HttpConstants.SubStatusCodes.UNKNOWN;
if (exception instanceof CosmosException) {
if (exception instanceof PartitionKeyRangeIsSplittingException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.COMPLETING_SPLIT_EXCEEDED_RETRY_LIMIT;
} else if (exception instanceof PartitionIsMigratingException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.COMPLETING_PARTITION_MIGRATION_EXCEEDED_RETRY_LIMIT;
} else if (exception instanceof InvalidPartitionException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE_EXCEEDED_RETRY_LIMIT;
} else if (exception instanceof PartitionKeyRangeGoneException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE_EXCEEDED_RETRY_LIMIT;
} else {
exceptionSubStatusCode = ((CosmosException) exception).getSubStatusCode();
}
}
return exceptionSubStatusCode;
}
@Override
public Mono<ShouldRetryResult> shouldRetry(Exception exception) {
CosmosException exceptionToThrow;
Duration backoffTime = Duration.ofSeconds(0);
Duration timeout;
boolean forceRefreshAddressCache;
if (isNonRetryableException(exception)) {
logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount,
exception);
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
} else if (exception instanceof GoneException &&
!request.isReadOnly() &&
BridgeInternal.hasSendingRequestStarted((CosmosException)exception) &&
!((GoneException)exception).isBasedOn410ResponseFromService() &&
!this.request.getNonIdempotentWriteRetriesEnabled()) {
logger.warn(
"Operation will NOT be retried. Write operations which failed due to transient transport errors " +
"can not be retried safely when sending the request " +
"to the service because they aren't idempotent. Current attempt {}, Exception: ",
this.attemptCount,
exception);
return Mono.just(ShouldRetryResult.noRetry(
Quadruple.with(true, true, Duration.ofMillis(0), this.attemptCount.get())));
}
long remainingSeconds = this.waitTimeInSeconds -
GoneAndRetryWithRetryPolicy.this.getElapsedTime().toMillis() / 1_000L;
int currentRetryAttemptCount = this.attemptCount.get();
if (this.attemptCount.getAndIncrement() > 1) {
if (remainingSeconds <= 0) {
exceptionToThrow = logAndWrapExceptionWithLastRetryWithException(exception);
return Mono.just(ShouldRetryResult.error(exceptionToThrow));
}
backoffTime = Duration.ofSeconds(Math.min(Math.min(this.currentBackoffSeconds.get(), remainingSeconds),
GoneRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS));
this.currentBackoffSeconds.accumulateAndGet(GoneRetryPolicy.BACK_OFF_MULTIPLIER, (left, right) -> left * right);
logger.debug("BackoffTime: {} seconds.", backoffTime.getSeconds());
}
long timeoutInMillSec = remainingSeconds*1000 - backoffTime.toMillis();
timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec)
: Duration.ofSeconds(GoneRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS);
logger.debug("Timeout. {} - BackoffTime {} - currentBackoffSeconds {} - CurrentRetryAttemptCount {}",
timeout.toMillis(),
backoffTime,
this.currentBackoffSeconds,
currentRetryAttemptCount);
Pair<Mono<ShouldRetryResult>, Boolean> exceptionHandlingResult = handleException(exception);
Mono<ShouldRetryResult> result = exceptionHandlingResult.getLeft();
if (result != null) {
return result;
}
forceRefreshAddressCache = exceptionHandlingResult.getRight();
return Mono.just(ShouldRetryResult.retryAfter(backoffTime,
Quadruple.with(forceRefreshAddressCache, true, timeout, currentRetryAttemptCount)));
}
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private Pair<Mono<ShouldRetryResult>, Boolean> handleException(Exception exception) {
if (exception instanceof GoneException) {
return handleGoneException((GoneException)exception);
} else if (exception instanceof PartitionIsMigratingException) {
return handlePartitionIsMigratingException((PartitionIsMigratingException)exception);
} else if (exception instanceof InvalidPartitionException) {
return handleInvalidPartitionException((InvalidPartitionException)exception);
} else if (exception instanceof PartitionKeyRangeIsSplittingException) {
return handlePartitionKeyIsSplittingException((PartitionKeyRangeIsSplittingException) exception);
}
throw new IllegalStateException("Invalid exception type", exception);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handleGoneException(GoneException exception) {
logger.debug("Received gone exception, will retry, {}", exception.toString());
return Pair.of(null, true);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handlePartitionIsMigratingException(PartitionIsMigratingException exception) {
logger.debug("Received PartitionIsMigratingException, will retry, {}", exception.toString());
this.request.forceCollectionRoutingMapRefresh = true;
return Pair.of(null, true);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handlePartitionKeyIsSplittingException(PartitionKeyRangeIsSplittingException exception) {
this.request.requestContext.resolvedPartitionKeyRange = null;
this.request.requestContext.quorumSelectedLSN = -1;
this.request.requestContext.quorumSelectedStoreResponse = null;
logger.debug("Received partition key range splitting exception, will retry, {}", exception.toString());
this.request.forcePartitionKeyRangeRefresh = true;
return Pair.of(null, false);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handleInvalidPartitionException(InvalidPartitionException exception) {
this.request.requestContext.quorumSelectedLSN = -1;
this.request.requestContext.resolvedPartitionKeyRange = null;
this.request.requestContext.quorumSelectedStoreResponse = null;
this.request.requestContext.globalCommittedSelectedLSN = -1;
if (this.attemptCountInvalidPartition.getAndIncrement() > 2) {
logger.warn("Received second InvalidPartitionException after backoff/retry. Will fail the request. {}",
exception.toString());
return Pair.of(
Mono.just(ShouldRetryResult.error(BridgeInternal.createServiceUnavailableException(exception, HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE_EXCEEDED_RETRY_LIMIT))),
false);
}
logger.debug("Received invalid collection exception, will retry, {}", exception.toString());
this.request.forceNameCacheRefresh = true;
return Pair.of(null, false);
}
}
class RetryWithRetryPolicy implements IRetryPolicy {
private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30;
private final static int MAXIMUM_BACKOFF_TIME_IN_MS = 1000;
private final static int INITIAL_BACKOFF_TIME_MS = 10;
private final static int BACK_OFF_MULTIPLIER = 2;
private final static int RANDOM_SALT_IN_MS = 5;
private final AtomicInteger attemptCount = new AtomicInteger(1);
private final AtomicInteger currentBackoffMilliseconds = new AtomicInteger(RetryWithRetryPolicy.INITIAL_BACKOFF_TIME_MS);
private final int waitTimeInSeconds;
private final RetryContext retryContext;
public RetryWithRetryPolicy(Integer waitTimeInSeconds, RetryContext retryContext) {
this.waitTimeInSeconds = waitTimeInSeconds != null ? waitTimeInSeconds : DEFAULT_WAIT_TIME_IN_SECONDS;
this.retryContext = retryContext;
}
@Override
public Mono<ShouldRetryResult> shouldRetry(Exception exception) {
Duration backoffTime;
Duration timeout;
if (!(exception instanceof RetryWithException)) {
logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount.get(),
exception);
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
RetryWithException lastRetryWithException = (RetryWithException)exception;
GoneAndRetryWithRetryPolicy.this.lastRetryWithException = lastRetryWithException;
long remainingMilliseconds =
(this.waitTimeInSeconds * 1_000L) -
GoneAndRetryWithRetryPolicy.this.getElapsedTime().toMillis();
int currentRetryAttemptCount = this.attemptCount.getAndIncrement();
if (remainingMilliseconds <= 0) {
logger.warn("Received RetryWithException after backoff/retry. Will fail the request.",
lastRetryWithException);
return Mono.just(ShouldRetryResult.error(lastRetryWithException));
}
backoffTime = Duration.ofMillis(
Math.min(
Math.min(this.currentBackoffMilliseconds.get() + random.nextInt(RANDOM_SALT_IN_MS), remainingMilliseconds),
RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS));
this.currentBackoffMilliseconds.set(
Math.min(
RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS,
this.currentBackoffMilliseconds.get() * RetryWithRetryPolicy.BACK_OFF_MULTIPLIER)
);
logger.debug("BackoffTime: {} ms.", backoffTime.toMillis());
long timeoutInMillSec = remainingMilliseconds - backoffTime.toMillis();
timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec)
: Duration.ofMillis(RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS);
logger.debug("Received RetryWithException, will retry, ", exception);
return Mono.just(ShouldRetryResult.retryAfter(backoffTime,
Quadruple.with(false, true, timeout, currentRetryAttemptCount)));
}
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
}
} | class GoneAndRetryWithRetryPolicy implements IRetryPolicy {
private final static Logger logger = LoggerFactory.getLogger(GoneAndRetryWithRetryPolicy.class);
private final GoneRetryPolicy goneRetryPolicy;
private final RetryWithRetryPolicy retryWithRetryPolicy;
private final Instant start;
private volatile Instant end;
private volatile RetryWithException lastRetryWithException;
private RetryContext retryContext;
private static final ThreadLocalRandom random = ThreadLocalRandom.current();
public GoneAndRetryWithRetryPolicy(RxDocumentServiceRequest request, Integer waitTimeInSeconds) {
this.retryContext = BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics);
this.goneRetryPolicy = new GoneRetryPolicy(
request,
waitTimeInSeconds,
this.retryContext
);
this.retryWithRetryPolicy = new RetryWithRetryPolicy(waitTimeInSeconds, this.retryContext);
this.start = Instant.now();
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private Duration getElapsedTime() {
Instant endSnapshot = this.end != null ? this.end : Instant.now();
return Duration.between(this.start, endSnapshot);
}
class GoneRetryPolicy implements IRetryPolicy {
private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30;
private final static int MAXIMUM_BACKOFF_TIME_IN_SECONDS = 15;
private final static int INITIAL_BACKOFF_TIME = 1;
private final static int BACK_OFF_MULTIPLIER = 2;
private final RxDocumentServiceRequest request;
private final AtomicInteger attemptCount = new AtomicInteger(1);
private final AtomicInteger attemptCountInvalidPartition = new AtomicInteger(1);
private final AtomicInteger currentBackoffSeconds = new AtomicInteger(GoneRetryPolicy.INITIAL_BACKOFF_TIME);
private final int waitTimeInSeconds;
private RetryContext retryContext;
public GoneRetryPolicy(
RxDocumentServiceRequest request,
Integer waitTimeInSeconds,
RetryContext retryContext) {
checkNotNull(request, "request must not be null.");
this.request = request;
this.waitTimeInSeconds = waitTimeInSeconds != null ? waitTimeInSeconds : DEFAULT_WAIT_TIME_IN_SECONDS;
this.retryContext = retryContext;
}
private boolean isNonRetryableException(Exception exception) {
if (exception instanceof GoneException ||
exception instanceof PartitionIsMigratingException ||
exception instanceof PartitionKeyRangeIsSplittingException) {
return false;
}
if (exception instanceof InvalidPartitionException) {
return this.request.getPartitionKeyRangeIdentity() != null &&
this.request.getPartitionKeyRangeIdentity().getCollectionRid() != null;
}
return true;
}
private CosmosException logAndWrapExceptionWithLastRetryWithException(Exception exception) {
String exceptionType;
if (exception instanceof GoneException) {
exceptionType = "GoneException";
} else if (exception instanceof PartitionKeyRangeGoneException) {
exceptionType = "PartitionKeyRangeGoneException";
} else if (exception instanceof InvalidPartitionException) {
exceptionType = "InvalidPartitionException";
} else if (exception instanceof PartitionKeyRangeIsSplittingException) {
exceptionType = "PartitionKeyRangeIsSplittingException";
} else if (exception instanceof CosmosException) {
logger.warn("Received CosmosException after backoff/retry. Will fail the request.",
exception);
return (CosmosException)exception;
} else {
throw new IllegalStateException("Invalid exception type", exception);
}
RetryWithException lastRetryWithExceptionSnapshot =
GoneAndRetryWithRetryPolicy.this.lastRetryWithException;
if (lastRetryWithExceptionSnapshot != null) {
logger.warn(
"Received {} after backoff/retry including at least one RetryWithException. "
+ "Will fail the request with RetryWithException. {}: {}. RetryWithException: {}",
exceptionType,
exceptionType,
exception,
lastRetryWithExceptionSnapshot);
return lastRetryWithExceptionSnapshot;
}
logger.warn(
"Received {} after backoff/retry. Will fail the request. {}",
exceptionType,
exception);
int subStatusCode = getExceptionSubStatusCodeForGoneRetryPolicy(exception);
return BridgeInternal.createServiceUnavailableException(exception, subStatusCode);
}
private int getExceptionSubStatusCodeForGoneRetryPolicy(Exception exception) {
int exceptionSubStatusCode = HttpConstants.SubStatusCodes.UNKNOWN;
if (exception instanceof CosmosException) {
if (exception instanceof PartitionKeyRangeIsSplittingException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.COMPLETING_SPLIT_EXCEEDED_RETRY_LIMIT;
} else if (exception instanceof PartitionIsMigratingException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.COMPLETING_PARTITION_MIGRATION_EXCEEDED_RETRY_LIMIT;
} else if (exception instanceof InvalidPartitionException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE_EXCEEDED_RETRY_LIMIT;
} else if (exception instanceof PartitionKeyRangeGoneException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE_EXCEEDED_RETRY_LIMIT;
} else {
exceptionSubStatusCode = ((CosmosException) exception).getSubStatusCode();
}
}
return exceptionSubStatusCode;
}
@Override
public Mono<ShouldRetryResult> shouldRetry(Exception exception) {
CosmosException exceptionToThrow;
Duration backoffTime = Duration.ofSeconds(0);
Duration timeout;
boolean forceRefreshAddressCache;
if (isNonRetryableException(exception)) {
logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount,
exception);
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
} else if (exception instanceof GoneException &&
!request.isReadOnly() &&
BridgeInternal.hasSendingRequestStarted((CosmosException)exception) &&
!((GoneException)exception).isBasedOn410ResponseFromService() &&
!this.request.getNonIdempotentWriteRetriesEnabled()) {
logger.warn(
"Operation will NOT be retried. Write operations which failed due to transient transport errors " +
"can not be retried safely when sending the request " +
"to the service because they aren't idempotent. Current attempt {}, Exception: ",
this.attemptCount,
exception);
return Mono.just(ShouldRetryResult.noRetry(
Quadruple.with(true, true, Duration.ofMillis(0), this.attemptCount.get())));
}
long remainingSeconds = this.waitTimeInSeconds -
GoneAndRetryWithRetryPolicy.this.getElapsedTime().toMillis() / 1_000L;
int currentRetryAttemptCount = this.attemptCount.get();
if (this.attemptCount.getAndIncrement() > 1) {
if (remainingSeconds <= 0) {
exceptionToThrow = logAndWrapExceptionWithLastRetryWithException(exception);
return Mono.just(ShouldRetryResult.error(exceptionToThrow));
}
backoffTime = Duration.ofSeconds(Math.min(Math.min(this.currentBackoffSeconds.get(), remainingSeconds),
GoneRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS));
this.currentBackoffSeconds.accumulateAndGet(GoneRetryPolicy.BACK_OFF_MULTIPLIER, (left, right) -> left * right);
logger.debug("BackoffTime: {} seconds.", backoffTime.getSeconds());
}
long timeoutInMillSec = remainingSeconds*1000 - backoffTime.toMillis();
timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec)
: Duration.ofSeconds(GoneRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS);
logger.debug("Timeout. {} - BackoffTime {} - currentBackoffSeconds {} - CurrentRetryAttemptCount {}",
timeout.toMillis(),
backoffTime,
this.currentBackoffSeconds,
currentRetryAttemptCount);
Pair<Mono<ShouldRetryResult>, Boolean> exceptionHandlingResult = handleException(exception);
Mono<ShouldRetryResult> result = exceptionHandlingResult.getLeft();
if (result != null) {
return result;
}
forceRefreshAddressCache = exceptionHandlingResult.getRight();
return Mono.just(ShouldRetryResult.retryAfter(backoffTime,
Quadruple.with(forceRefreshAddressCache, true, timeout, currentRetryAttemptCount)));
}
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private Pair<Mono<ShouldRetryResult>, Boolean> handleException(Exception exception) {
if (exception instanceof GoneException) {
return handleGoneException((GoneException)exception);
} else if (exception instanceof PartitionIsMigratingException) {
return handlePartitionIsMigratingException((PartitionIsMigratingException)exception);
} else if (exception instanceof InvalidPartitionException) {
return handleInvalidPartitionException((InvalidPartitionException)exception);
} else if (exception instanceof PartitionKeyRangeIsSplittingException) {
return handlePartitionKeyIsSplittingException((PartitionKeyRangeIsSplittingException) exception);
}
throw new IllegalStateException("Invalid exception type", exception);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handleGoneException(GoneException exception) {
logger.debug("Received gone exception, will retry, {}", exception.toString());
return Pair.of(null, true);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handlePartitionIsMigratingException(PartitionIsMigratingException exception) {
logger.debug("Received PartitionIsMigratingException, will retry, {}", exception.toString());
this.request.forceCollectionRoutingMapRefresh = true;
return Pair.of(null, true);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handlePartitionKeyIsSplittingException(PartitionKeyRangeIsSplittingException exception) {
this.request.requestContext.resolvedPartitionKeyRange = null;
this.request.requestContext.quorumSelectedLSN = -1;
this.request.requestContext.quorumSelectedStoreResponse = null;
logger.debug("Received partition key range splitting exception, will retry, {}", exception.toString());
this.request.forcePartitionKeyRangeRefresh = true;
return Pair.of(null, false);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handleInvalidPartitionException(InvalidPartitionException exception) {
this.request.requestContext.quorumSelectedLSN = -1;
this.request.requestContext.resolvedPartitionKeyRange = null;
this.request.requestContext.quorumSelectedStoreResponse = null;
this.request.requestContext.globalCommittedSelectedLSN = -1;
if (this.attemptCountInvalidPartition.getAndIncrement() > 2) {
logger.warn("Received second InvalidPartitionException after backoff/retry. Will fail the request. {}",
exception.toString());
return Pair.of(
Mono.just(ShouldRetryResult.error(BridgeInternal.createServiceUnavailableException(exception, HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE_EXCEEDED_RETRY_LIMIT))),
false);
}
logger.debug("Received invalid collection exception, will retry, {}", exception.toString());
this.request.forceNameCacheRefresh = true;
return Pair.of(null, false);
}
}
class RetryWithRetryPolicy implements IRetryPolicy {
private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30;
private final static int MAXIMUM_BACKOFF_TIME_IN_MS = 1000;
private final static int INITIAL_BACKOFF_TIME_MS = 10;
private final static int BACK_OFF_MULTIPLIER = 2;
private final static int RANDOM_SALT_IN_MS = 5;
private final AtomicInteger attemptCount = new AtomicInteger(1);
private final AtomicInteger currentBackoffMilliseconds = new AtomicInteger(RetryWithRetryPolicy.INITIAL_BACKOFF_TIME_MS);
private final int waitTimeInSeconds;
private final RetryContext retryContext;
public RetryWithRetryPolicy(Integer waitTimeInSeconds, RetryContext retryContext) {
this.waitTimeInSeconds = waitTimeInSeconds != null ? waitTimeInSeconds : DEFAULT_WAIT_TIME_IN_SECONDS;
this.retryContext = retryContext;
}
@Override
public Mono<ShouldRetryResult> shouldRetry(Exception exception) {
Duration backoffTime;
Duration timeout;
if (!(exception instanceof RetryWithException)) {
logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount.get(),
exception);
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
RetryWithException lastRetryWithException = (RetryWithException)exception;
GoneAndRetryWithRetryPolicy.this.lastRetryWithException = lastRetryWithException;
long remainingMilliseconds =
(this.waitTimeInSeconds * 1_000L) -
GoneAndRetryWithRetryPolicy.this.getElapsedTime().toMillis();
int currentRetryAttemptCount = this.attemptCount.getAndIncrement();
if (remainingMilliseconds <= 0) {
logger.warn("Received RetryWithException after backoff/retry. Will fail the request.",
lastRetryWithException);
return Mono.just(ShouldRetryResult.error(lastRetryWithException));
}
backoffTime = Duration.ofMillis(
Math.min(
Math.min(this.currentBackoffMilliseconds.get() + random.nextInt(RANDOM_SALT_IN_MS), remainingMilliseconds),
RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS));
this.currentBackoffMilliseconds.set(
Math.max(
RetryWithRetryPolicy.INITIAL_BACKOFF_TIME_MS,
Math.min(
RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS,
this.currentBackoffMilliseconds.get() * RetryWithRetryPolicy.BACK_OFF_MULTIPLIER))
);
logger.debug("BackoffTime: {} ms.", backoffTime.toMillis());
long timeoutInMillSec = remainingMilliseconds - backoffTime.toMillis();
timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec)
: Duration.ofMillis(RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS);
logger.debug("Received RetryWithException, will retry, ", exception);
return Mono.just(ShouldRetryResult.retryAfter(backoffTime,
Quadruple.with(false, true, timeout, currentRetryAttemptCount)));
}
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
}
} |
added | public Mono<ShouldRetryResult> shouldRetry(Exception exception) {
Duration backoffTime;
Duration timeout;
if (!(exception instanceof RetryWithException)) {
logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount.get(),
exception);
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
RetryWithException lastRetryWithException = (RetryWithException)exception;
GoneAndRetryWithRetryPolicy.this.lastRetryWithException = lastRetryWithException;
long remainingMilliseconds =
(this.waitTimeInSeconds * 1_000L) -
GoneAndRetryWithRetryPolicy.this.getElapsedTime().toMillis();
int currentRetryAttemptCount = this.attemptCount.getAndIncrement();
if (remainingMilliseconds <= 0) {
logger.warn("Received RetryWithException after backoff/retry. Will fail the request.",
lastRetryWithException);
return Mono.just(ShouldRetryResult.error(lastRetryWithException));
}
backoffTime = Duration.ofMillis(
Math.min(
Math.min(this.currentBackoffMilliseconds.get() + random.nextInt(RANDOM_SALT_IN_MS), remainingMilliseconds),
RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS));
this.currentBackoffMilliseconds.set(
Math.min(
RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS,
this.currentBackoffMilliseconds.get() * RetryWithRetryPolicy.BACK_OFF_MULTIPLIER)
);
logger.debug("BackoffTime: {} ms.", backoffTime.toMillis());
long timeoutInMillSec = remainingMilliseconds - backoffTime.toMillis();
timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec)
: Duration.ofMillis(RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS);
logger.debug("Received RetryWithException, will retry, ", exception);
return Mono.just(ShouldRetryResult.retryAfter(backoffTime,
Quadruple.with(false, true, timeout, currentRetryAttemptCount)));
} | Math.min( | public Mono<ShouldRetryResult> shouldRetry(Exception exception) {
return this.retryWithRetryPolicy.shouldRetry(exception)
.flatMap((retryWithResult) -> {
if (!retryWithResult.nonRelatedException) {
return Mono.just(retryWithResult);
}
return this.goneRetryPolicy.shouldRetry(exception)
.flatMap((goneRetryResult) -> {
if (!goneRetryResult.shouldRetry) {
logger.debug("Operation will NOT be retried. Exception:",
exception);
this.end = Instant.now();
}
return Mono.just(goneRetryResult);
});
});
} | class GoneAndRetryWithRetryPolicy implements IRetryPolicy {
private final static Logger logger = LoggerFactory.getLogger(GoneAndRetryWithRetryPolicy.class);
private final GoneRetryPolicy goneRetryPolicy;
private final RetryWithRetryPolicy retryWithRetryPolicy;
private final Instant start;
private volatile Instant end;
private volatile RetryWithException lastRetryWithException;
private RetryContext retryContext;
private static final ThreadLocalRandom random = ThreadLocalRandom.current();
public GoneAndRetryWithRetryPolicy(RxDocumentServiceRequest request, Integer waitTimeInSeconds) {
this.retryContext = BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics);
this.goneRetryPolicy = new GoneRetryPolicy(
request,
waitTimeInSeconds,
this.retryContext
);
this.retryWithRetryPolicy = new RetryWithRetryPolicy(waitTimeInSeconds, this.retryContext);
this.start = Instant.now();
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private Duration getElapsedTime() {
Instant endSnapshot = this.end != null ? this.end : Instant.now();
return Duration.between(this.start, endSnapshot);
}
class GoneRetryPolicy implements IRetryPolicy {
private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30;
private final static int MAXIMUM_BACKOFF_TIME_IN_SECONDS = 15;
private final static int INITIAL_BACKOFF_TIME = 1;
private final static int BACK_OFF_MULTIPLIER = 2;
private final RxDocumentServiceRequest request;
private final AtomicInteger attemptCount = new AtomicInteger(1);
private final AtomicInteger attemptCountInvalidPartition = new AtomicInteger(1);
private final AtomicInteger currentBackoffSeconds = new AtomicInteger(GoneRetryPolicy.INITIAL_BACKOFF_TIME);
private final int waitTimeInSeconds;
private RetryContext retryContext;
public GoneRetryPolicy(
RxDocumentServiceRequest request,
Integer waitTimeInSeconds,
RetryContext retryContext) {
checkNotNull(request, "request must not be null.");
this.request = request;
this.waitTimeInSeconds = waitTimeInSeconds != null ? waitTimeInSeconds : DEFAULT_WAIT_TIME_IN_SECONDS;
this.retryContext = retryContext;
}
private boolean isNonRetryableException(Exception exception) {
if (exception instanceof GoneException ||
exception instanceof PartitionIsMigratingException ||
exception instanceof PartitionKeyRangeIsSplittingException) {
return false;
}
if (exception instanceof InvalidPartitionException) {
return this.request.getPartitionKeyRangeIdentity() != null &&
this.request.getPartitionKeyRangeIdentity().getCollectionRid() != null;
}
return true;
}
private CosmosException logAndWrapExceptionWithLastRetryWithException(Exception exception) {
String exceptionType;
if (exception instanceof GoneException) {
exceptionType = "GoneException";
} else if (exception instanceof PartitionKeyRangeGoneException) {
exceptionType = "PartitionKeyRangeGoneException";
} else if (exception instanceof InvalidPartitionException) {
exceptionType = "InvalidPartitionException";
} else if (exception instanceof PartitionKeyRangeIsSplittingException) {
exceptionType = "PartitionKeyRangeIsSplittingException";
} else if (exception instanceof CosmosException) {
logger.warn("Received CosmosException after backoff/retry. Will fail the request.",
exception);
return (CosmosException)exception;
} else {
throw new IllegalStateException("Invalid exception type", exception);
}
RetryWithException lastRetryWithExceptionSnapshot =
GoneAndRetryWithRetryPolicy.this.lastRetryWithException;
if (lastRetryWithExceptionSnapshot != null) {
logger.warn(
"Received {} after backoff/retry including at least one RetryWithException. "
+ "Will fail the request with RetryWithException. {}: {}. RetryWithException: {}",
exceptionType,
exceptionType,
exception,
lastRetryWithExceptionSnapshot);
return lastRetryWithExceptionSnapshot;
}
logger.warn(
"Received {} after backoff/retry. Will fail the request. {}",
exceptionType,
exception);
int subStatusCode = getExceptionSubStatusCodeForGoneRetryPolicy(exception);
return BridgeInternal.createServiceUnavailableException(exception, subStatusCode);
}
private int getExceptionSubStatusCodeForGoneRetryPolicy(Exception exception) {
int exceptionSubStatusCode = HttpConstants.SubStatusCodes.UNKNOWN;
if (exception instanceof CosmosException) {
if (exception instanceof PartitionKeyRangeIsSplittingException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.COMPLETING_SPLIT_EXCEEDED_RETRY_LIMIT;
} else if (exception instanceof PartitionIsMigratingException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.COMPLETING_PARTITION_MIGRATION_EXCEEDED_RETRY_LIMIT;
} else if (exception instanceof InvalidPartitionException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE_EXCEEDED_RETRY_LIMIT;
} else if (exception instanceof PartitionKeyRangeGoneException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE_EXCEEDED_RETRY_LIMIT;
} else {
exceptionSubStatusCode = ((CosmosException) exception).getSubStatusCode();
}
}
return exceptionSubStatusCode;
}
@Override
public Mono<ShouldRetryResult> shouldRetry(Exception exception) {
CosmosException exceptionToThrow;
Duration backoffTime = Duration.ofSeconds(0);
Duration timeout;
boolean forceRefreshAddressCache;
if (isNonRetryableException(exception)) {
logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount,
exception);
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
} else if (exception instanceof GoneException &&
!request.isReadOnly() &&
BridgeInternal.hasSendingRequestStarted((CosmosException)exception) &&
!((GoneException)exception).isBasedOn410ResponseFromService() &&
!this.request.getNonIdempotentWriteRetriesEnabled()) {
logger.warn(
"Operation will NOT be retried. Write operations which failed due to transient transport errors " +
"can not be retried safely when sending the request " +
"to the service because they aren't idempotent. Current attempt {}, Exception: ",
this.attemptCount,
exception);
return Mono.just(ShouldRetryResult.noRetry(
Quadruple.with(true, true, Duration.ofMillis(0), this.attemptCount.get())));
}
long remainingSeconds = this.waitTimeInSeconds -
GoneAndRetryWithRetryPolicy.this.getElapsedTime().toMillis() / 1_000L;
int currentRetryAttemptCount = this.attemptCount.get();
if (this.attemptCount.getAndIncrement() > 1) {
if (remainingSeconds <= 0) {
exceptionToThrow = logAndWrapExceptionWithLastRetryWithException(exception);
return Mono.just(ShouldRetryResult.error(exceptionToThrow));
}
backoffTime = Duration.ofSeconds(Math.min(Math.min(this.currentBackoffSeconds.get(), remainingSeconds),
GoneRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS));
this.currentBackoffSeconds.accumulateAndGet(GoneRetryPolicy.BACK_OFF_MULTIPLIER, (left, right) -> left * right);
logger.debug("BackoffTime: {} seconds.", backoffTime.getSeconds());
}
long timeoutInMillSec = remainingSeconds*1000 - backoffTime.toMillis();
timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec)
: Duration.ofSeconds(GoneRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS);
logger.debug("Timeout. {} - BackoffTime {} - currentBackoffSeconds {} - CurrentRetryAttemptCount {}",
timeout.toMillis(),
backoffTime,
this.currentBackoffSeconds,
currentRetryAttemptCount);
Pair<Mono<ShouldRetryResult>, Boolean> exceptionHandlingResult = handleException(exception);
Mono<ShouldRetryResult> result = exceptionHandlingResult.getLeft();
if (result != null) {
return result;
}
forceRefreshAddressCache = exceptionHandlingResult.getRight();
return Mono.just(ShouldRetryResult.retryAfter(backoffTime,
Quadruple.with(forceRefreshAddressCache, true, timeout, currentRetryAttemptCount)));
}
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private Pair<Mono<ShouldRetryResult>, Boolean> handleException(Exception exception) {
if (exception instanceof GoneException) {
return handleGoneException((GoneException)exception);
} else if (exception instanceof PartitionIsMigratingException) {
return handlePartitionIsMigratingException((PartitionIsMigratingException)exception);
} else if (exception instanceof InvalidPartitionException) {
return handleInvalidPartitionException((InvalidPartitionException)exception);
} else if (exception instanceof PartitionKeyRangeIsSplittingException) {
return handlePartitionKeyIsSplittingException((PartitionKeyRangeIsSplittingException) exception);
}
throw new IllegalStateException("Invalid exception type", exception);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handleGoneException(GoneException exception) {
logger.debug("Received gone exception, will retry, {}", exception.toString());
return Pair.of(null, true);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handlePartitionIsMigratingException(PartitionIsMigratingException exception) {
logger.debug("Received PartitionIsMigratingException, will retry, {}", exception.toString());
this.request.forceCollectionRoutingMapRefresh = true;
return Pair.of(null, true);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handlePartitionKeyIsSplittingException(PartitionKeyRangeIsSplittingException exception) {
this.request.requestContext.resolvedPartitionKeyRange = null;
this.request.requestContext.quorumSelectedLSN = -1;
this.request.requestContext.quorumSelectedStoreResponse = null;
logger.debug("Received partition key range splitting exception, will retry, {}", exception.toString());
this.request.forcePartitionKeyRangeRefresh = true;
return Pair.of(null, false);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handleInvalidPartitionException(InvalidPartitionException exception) {
this.request.requestContext.quorumSelectedLSN = -1;
this.request.requestContext.resolvedPartitionKeyRange = null;
this.request.requestContext.quorumSelectedStoreResponse = null;
this.request.requestContext.globalCommittedSelectedLSN = -1;
if (this.attemptCountInvalidPartition.getAndIncrement() > 2) {
logger.warn("Received second InvalidPartitionException after backoff/retry. Will fail the request. {}",
exception.toString());
return Pair.of(
Mono.just(ShouldRetryResult.error(BridgeInternal.createServiceUnavailableException(exception, HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE_EXCEEDED_RETRY_LIMIT))),
false);
}
logger.debug("Received invalid collection exception, will retry, {}", exception.toString());
this.request.forceNameCacheRefresh = true;
return Pair.of(null, false);
}
}
class RetryWithRetryPolicy implements IRetryPolicy {
private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30;
private final static int MAXIMUM_BACKOFF_TIME_IN_MS = 1000;
private final static int INITIAL_BACKOFF_TIME_MS = 10;
private final static int BACK_OFF_MULTIPLIER = 2;
private final static int RANDOM_SALT_IN_MS = 5;
private final AtomicInteger attemptCount = new AtomicInteger(1);
private final AtomicInteger currentBackoffMilliseconds = new AtomicInteger(RetryWithRetryPolicy.INITIAL_BACKOFF_TIME_MS);
private final int waitTimeInSeconds;
private final RetryContext retryContext;
public RetryWithRetryPolicy(Integer waitTimeInSeconds, RetryContext retryContext) {
this.waitTimeInSeconds = waitTimeInSeconds != null ? waitTimeInSeconds : DEFAULT_WAIT_TIME_IN_SECONDS;
this.retryContext = retryContext;
}
@Override
public Mono<ShouldRetryResult> shouldRetry(Exception exception) {
Duration backoffTime;
Duration timeout;
if (!(exception instanceof RetryWithException)) {
logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount.get(),
exception);
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
RetryWithException lastRetryWithException = (RetryWithException)exception;
GoneAndRetryWithRetryPolicy.this.lastRetryWithException = lastRetryWithException;
long remainingMilliseconds =
(this.waitTimeInSeconds * 1_000L) -
GoneAndRetryWithRetryPolicy.this.getElapsedTime().toMillis();
int currentRetryAttemptCount = this.attemptCount.getAndIncrement();
if (remainingMilliseconds <= 0) {
logger.warn("Received RetryWithException after backoff/retry. Will fail the request.",
lastRetryWithException);
return Mono.just(ShouldRetryResult.error(lastRetryWithException));
}
backoffTime = Duration.ofMillis(
Math.min(
Math.min(this.currentBackoffMilliseconds.get() + random.nextInt(RANDOM_SALT_IN_MS), remainingMilliseconds),
RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS));
this.currentBackoffMilliseconds.set(
Math.min(
RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS,
this.currentBackoffMilliseconds.get() * RetryWithRetryPolicy.BACK_OFF_MULTIPLIER)
);
logger.debug("BackoffTime: {} ms.", backoffTime.toMillis());
long timeoutInMillSec = remainingMilliseconds - backoffTime.toMillis();
timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec)
: Duration.ofMillis(RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS);
logger.debug("Received RetryWithException, will retry, ", exception);
return Mono.just(ShouldRetryResult.retryAfter(backoffTime,
Quadruple.with(false, true, timeout, currentRetryAttemptCount)));
}
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
}
} | class GoneAndRetryWithRetryPolicy implements IRetryPolicy {
private final static Logger logger = LoggerFactory.getLogger(GoneAndRetryWithRetryPolicy.class);
private final GoneRetryPolicy goneRetryPolicy;
private final RetryWithRetryPolicy retryWithRetryPolicy;
private final Instant start;
private volatile Instant end;
private volatile RetryWithException lastRetryWithException;
private RetryContext retryContext;
private static final ThreadLocalRandom random = ThreadLocalRandom.current();
public GoneAndRetryWithRetryPolicy(RxDocumentServiceRequest request, Integer waitTimeInSeconds) {
this.retryContext = BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics);
this.goneRetryPolicy = new GoneRetryPolicy(
request,
waitTimeInSeconds,
this.retryContext
);
this.retryWithRetryPolicy = new RetryWithRetryPolicy(waitTimeInSeconds, this.retryContext);
this.start = Instant.now();
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private Duration getElapsedTime() {
Instant endSnapshot = this.end != null ? this.end : Instant.now();
return Duration.between(this.start, endSnapshot);
}
class GoneRetryPolicy implements IRetryPolicy {
private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30;
private final static int MAXIMUM_BACKOFF_TIME_IN_SECONDS = 15;
private final static int INITIAL_BACKOFF_TIME = 1;
private final static int BACK_OFF_MULTIPLIER = 2;
private final RxDocumentServiceRequest request;
private final AtomicInteger attemptCount = new AtomicInteger(1);
private final AtomicInteger attemptCountInvalidPartition = new AtomicInteger(1);
private final AtomicInteger currentBackoffSeconds = new AtomicInteger(GoneRetryPolicy.INITIAL_BACKOFF_TIME);
private final int waitTimeInSeconds;
private RetryContext retryContext;
public GoneRetryPolicy(
RxDocumentServiceRequest request,
Integer waitTimeInSeconds,
RetryContext retryContext) {
checkNotNull(request, "request must not be null.");
this.request = request;
this.waitTimeInSeconds = waitTimeInSeconds != null ? waitTimeInSeconds : DEFAULT_WAIT_TIME_IN_SECONDS;
this.retryContext = retryContext;
}
private boolean isNonRetryableException(Exception exception) {
if (exception instanceof GoneException ||
exception instanceof PartitionIsMigratingException ||
exception instanceof PartitionKeyRangeIsSplittingException) {
return false;
}
if (exception instanceof InvalidPartitionException) {
return this.request.getPartitionKeyRangeIdentity() != null &&
this.request.getPartitionKeyRangeIdentity().getCollectionRid() != null;
}
return true;
}
private CosmosException logAndWrapExceptionWithLastRetryWithException(Exception exception) {
String exceptionType;
if (exception instanceof GoneException) {
exceptionType = "GoneException";
} else if (exception instanceof PartitionKeyRangeGoneException) {
exceptionType = "PartitionKeyRangeGoneException";
} else if (exception instanceof InvalidPartitionException) {
exceptionType = "InvalidPartitionException";
} else if (exception instanceof PartitionKeyRangeIsSplittingException) {
exceptionType = "PartitionKeyRangeIsSplittingException";
} else if (exception instanceof CosmosException) {
logger.warn("Received CosmosException after backoff/retry. Will fail the request.",
exception);
return (CosmosException)exception;
} else {
throw new IllegalStateException("Invalid exception type", exception);
}
RetryWithException lastRetryWithExceptionSnapshot =
GoneAndRetryWithRetryPolicy.this.lastRetryWithException;
if (lastRetryWithExceptionSnapshot != null) {
logger.warn(
"Received {} after backoff/retry including at least one RetryWithException. "
+ "Will fail the request with RetryWithException. {}: {}. RetryWithException: {}",
exceptionType,
exceptionType,
exception,
lastRetryWithExceptionSnapshot);
return lastRetryWithExceptionSnapshot;
}
logger.warn(
"Received {} after backoff/retry. Will fail the request. {}",
exceptionType,
exception);
int subStatusCode = getExceptionSubStatusCodeForGoneRetryPolicy(exception);
return BridgeInternal.createServiceUnavailableException(exception, subStatusCode);
}
private int getExceptionSubStatusCodeForGoneRetryPolicy(Exception exception) {
int exceptionSubStatusCode = HttpConstants.SubStatusCodes.UNKNOWN;
if (exception instanceof CosmosException) {
if (exception instanceof PartitionKeyRangeIsSplittingException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.COMPLETING_SPLIT_EXCEEDED_RETRY_LIMIT;
} else if (exception instanceof PartitionIsMigratingException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.COMPLETING_PARTITION_MIGRATION_EXCEEDED_RETRY_LIMIT;
} else if (exception instanceof InvalidPartitionException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE_EXCEEDED_RETRY_LIMIT;
} else if (exception instanceof PartitionKeyRangeGoneException) {
exceptionSubStatusCode = HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE_EXCEEDED_RETRY_LIMIT;
} else {
exceptionSubStatusCode = ((CosmosException) exception).getSubStatusCode();
}
}
return exceptionSubStatusCode;
}
@Override
public Mono<ShouldRetryResult> shouldRetry(Exception exception) {
CosmosException exceptionToThrow;
Duration backoffTime = Duration.ofSeconds(0);
Duration timeout;
boolean forceRefreshAddressCache;
if (isNonRetryableException(exception)) {
logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount,
exception);
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
} else if (exception instanceof GoneException &&
!request.isReadOnly() &&
BridgeInternal.hasSendingRequestStarted((CosmosException)exception) &&
!((GoneException)exception).isBasedOn410ResponseFromService() &&
!this.request.getNonIdempotentWriteRetriesEnabled()) {
logger.warn(
"Operation will NOT be retried. Write operations which failed due to transient transport errors " +
"can not be retried safely when sending the request " +
"to the service because they aren't idempotent. Current attempt {}, Exception: ",
this.attemptCount,
exception);
return Mono.just(ShouldRetryResult.noRetry(
Quadruple.with(true, true, Duration.ofMillis(0), this.attemptCount.get())));
}
long remainingSeconds = this.waitTimeInSeconds -
GoneAndRetryWithRetryPolicy.this.getElapsedTime().toMillis() / 1_000L;
int currentRetryAttemptCount = this.attemptCount.get();
if (this.attemptCount.getAndIncrement() > 1) {
if (remainingSeconds <= 0) {
exceptionToThrow = logAndWrapExceptionWithLastRetryWithException(exception);
return Mono.just(ShouldRetryResult.error(exceptionToThrow));
}
backoffTime = Duration.ofSeconds(Math.min(Math.min(this.currentBackoffSeconds.get(), remainingSeconds),
GoneRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS));
this.currentBackoffSeconds.accumulateAndGet(GoneRetryPolicy.BACK_OFF_MULTIPLIER, (left, right) -> left * right);
logger.debug("BackoffTime: {} seconds.", backoffTime.getSeconds());
}
long timeoutInMillSec = remainingSeconds*1000 - backoffTime.toMillis();
timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec)
: Duration.ofSeconds(GoneRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS);
logger.debug("Timeout. {} - BackoffTime {} - currentBackoffSeconds {} - CurrentRetryAttemptCount {}",
timeout.toMillis(),
backoffTime,
this.currentBackoffSeconds,
currentRetryAttemptCount);
Pair<Mono<ShouldRetryResult>, Boolean> exceptionHandlingResult = handleException(exception);
Mono<ShouldRetryResult> result = exceptionHandlingResult.getLeft();
if (result != null) {
return result;
}
forceRefreshAddressCache = exceptionHandlingResult.getRight();
return Mono.just(ShouldRetryResult.retryAfter(backoffTime,
Quadruple.with(forceRefreshAddressCache, true, timeout, currentRetryAttemptCount)));
}
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private Pair<Mono<ShouldRetryResult>, Boolean> handleException(Exception exception) {
if (exception instanceof GoneException) {
return handleGoneException((GoneException)exception);
} else if (exception instanceof PartitionIsMigratingException) {
return handlePartitionIsMigratingException((PartitionIsMigratingException)exception);
} else if (exception instanceof InvalidPartitionException) {
return handleInvalidPartitionException((InvalidPartitionException)exception);
} else if (exception instanceof PartitionKeyRangeIsSplittingException) {
return handlePartitionKeyIsSplittingException((PartitionKeyRangeIsSplittingException) exception);
}
throw new IllegalStateException("Invalid exception type", exception);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handleGoneException(GoneException exception) {
logger.debug("Received gone exception, will retry, {}", exception.toString());
return Pair.of(null, true);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handlePartitionIsMigratingException(PartitionIsMigratingException exception) {
logger.debug("Received PartitionIsMigratingException, will retry, {}", exception.toString());
this.request.forceCollectionRoutingMapRefresh = true;
return Pair.of(null, true);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handlePartitionKeyIsSplittingException(PartitionKeyRangeIsSplittingException exception) {
this.request.requestContext.resolvedPartitionKeyRange = null;
this.request.requestContext.quorumSelectedLSN = -1;
this.request.requestContext.quorumSelectedStoreResponse = null;
logger.debug("Received partition key range splitting exception, will retry, {}", exception.toString());
this.request.forcePartitionKeyRangeRefresh = true;
return Pair.of(null, false);
}
private Pair<Mono<ShouldRetryResult>, Boolean> handleInvalidPartitionException(InvalidPartitionException exception) {
this.request.requestContext.quorumSelectedLSN = -1;
this.request.requestContext.resolvedPartitionKeyRange = null;
this.request.requestContext.quorumSelectedStoreResponse = null;
this.request.requestContext.globalCommittedSelectedLSN = -1;
if (this.attemptCountInvalidPartition.getAndIncrement() > 2) {
logger.warn("Received second InvalidPartitionException after backoff/retry. Will fail the request. {}",
exception.toString());
return Pair.of(
Mono.just(ShouldRetryResult.error(BridgeInternal.createServiceUnavailableException(exception, HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE_EXCEEDED_RETRY_LIMIT))),
false);
}
logger.debug("Received invalid collection exception, will retry, {}", exception.toString());
this.request.forceNameCacheRefresh = true;
return Pair.of(null, false);
}
}
class RetryWithRetryPolicy implements IRetryPolicy {
private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30;
private final static int MAXIMUM_BACKOFF_TIME_IN_MS = 1000;
private final static int INITIAL_BACKOFF_TIME_MS = 10;
private final static int BACK_OFF_MULTIPLIER = 2;
private final static int RANDOM_SALT_IN_MS = 5;
private final AtomicInteger attemptCount = new AtomicInteger(1);
private final AtomicInteger currentBackoffMilliseconds = new AtomicInteger(RetryWithRetryPolicy.INITIAL_BACKOFF_TIME_MS);
private final int waitTimeInSeconds;
private final RetryContext retryContext;
public RetryWithRetryPolicy(Integer waitTimeInSeconds, RetryContext retryContext) {
this.waitTimeInSeconds = waitTimeInSeconds != null ? waitTimeInSeconds : DEFAULT_WAIT_TIME_IN_SECONDS;
this.retryContext = retryContext;
}
@Override
public Mono<ShouldRetryResult> shouldRetry(Exception exception) {
Duration backoffTime;
Duration timeout;
if (!(exception instanceof RetryWithException)) {
logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount.get(),
exception);
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
RetryWithException lastRetryWithException = (RetryWithException)exception;
GoneAndRetryWithRetryPolicy.this.lastRetryWithException = lastRetryWithException;
long remainingMilliseconds =
(this.waitTimeInSeconds * 1_000L) -
GoneAndRetryWithRetryPolicy.this.getElapsedTime().toMillis();
int currentRetryAttemptCount = this.attemptCount.getAndIncrement();
if (remainingMilliseconds <= 0) {
logger.warn("Received RetryWithException after backoff/retry. Will fail the request.",
lastRetryWithException);
return Mono.just(ShouldRetryResult.error(lastRetryWithException));
}
backoffTime = Duration.ofMillis(
Math.min(
Math.min(this.currentBackoffMilliseconds.get() + random.nextInt(RANDOM_SALT_IN_MS), remainingMilliseconds),
RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS));
this.currentBackoffMilliseconds.set(
Math.max(
RetryWithRetryPolicy.INITIAL_BACKOFF_TIME_MS,
Math.min(
RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS,
this.currentBackoffMilliseconds.get() * RetryWithRetryPolicy.BACK_OFF_MULTIPLIER))
);
logger.debug("BackoffTime: {} ms.", backoffTime.toMillis());
long timeoutInMillSec = remainingMilliseconds - backoffTime.toMillis();
timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec)
: Duration.ofMillis(RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS);
logger.debug("Received RetryWithException, will retry, ", exception);
return Mono.just(ShouldRetryResult.retryAfter(backoffTime,
Quadruple.with(false, true, timeout, currentRetryAttemptCount)));
}
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
}
} |
Why does this need a cast? | public Mono<AccessToken> getToken(TokenRequestContext request) {
return Mono.defer(() -> {
isCachePopulated = isCachePopulated(request);
if (isCachePopulated) {
if (useConfidentialClient) {
return identityClient.authenticateWithConfidentialClientCache(request, cachedToken.get())
.map(accessToken -> (MsalToken) accessToken);
} else {
return identityClient.authenticateWithPublicClientCache(request, cachedToken.get())
.onErrorResume(t -> Mono.empty());
}
} else {
return Mono.empty();
}
}).switchIfEmpty(
Mono.defer(() -> identityClient.authenticateWithAuthorizationCode(request, authCode, redirectUri)))
.map(msalToken -> {
cachedToken.set(new MsalAuthenticationAccount(
new AuthenticationRecord(msalToken.getAuthenticationResult(),
identityClient.getTenantId(), identityClient.getClientId())));
if (request.isCaeEnabled()) {
isCaeEnabledRequestCached = true;
} else {
isCaeDisabledRequestCached = true;
}
return (AccessToken) msalToken;
})
.doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request))
.doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(),
request, error));
} | .map(accessToken -> (MsalToken) accessToken); | public Mono<AccessToken> getToken(TokenRequestContext request) {
return Mono.defer(() -> {
isCachePopulated = isCachePopulated(request);
if (isCachePopulated) {
if (useConfidentialClient) {
return identityClient.authenticateWithConfidentialClientCache(request, cachedToken.get())
.map(accessToken -> (MsalToken) accessToken);
} else {
return identityClient.authenticateWithPublicClientCache(request, cachedToken.get())
.onErrorResume(t -> Mono.empty());
}
} else {
return Mono.empty();
}
}).switchIfEmpty(
Mono.defer(() -> identityClient.authenticateWithAuthorizationCode(request, authCode, redirectUri)))
.map(msalToken -> {
cachedToken.set(new MsalAuthenticationAccount(
new AuthenticationRecord(msalToken.getAuthenticationResult(),
identityClient.getTenantId(), identityClient.getClientId())));
if (request.isCaeEnabled()) {
isCaeEnabledRequestCached = true;
} else {
isCaeDisabledRequestCached = true;
}
return (AccessToken) msalToken;
})
.doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request))
.doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(),
request, error));
} | class AuthorizationCodeCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(AuthorizationCodeCredential.class);
private final String authCode;
private final URI redirectUri;
private final IdentityClient identityClient;
private final AtomicReference<MsalAuthenticationAccount> cachedToken;
private boolean isCaeEnabledRequestCached;
private boolean isCaeDisabledRequestCached;
private boolean isCachePopulated;
private final boolean useConfidentialClient;
/**
* Creates an AuthorizationCodeCredential with the given identity client options.
*
* @param clientId the client ID of the application
* @param clientSecret the client secret of the application
* @param tenantId the tenant ID of the application
* @param authCode the Oauth 2.0 authorization code grant
* @param redirectUri the redirect URI used to authenticate to Azure Active Directory
* @param identityClientOptions the options for configuring the identity client
*/
AuthorizationCodeCredential(String clientId, String clientSecret, String tenantId, String authCode,
URI redirectUri, IdentityClientOptions identityClientOptions) {
identityClient = new IdentityClientBuilder()
.tenantId(tenantId)
.clientId(clientId)
.clientSecret(clientSecret)
.identityClientOptions(identityClientOptions)
.build();
this.cachedToken = new AtomicReference<>();
this.authCode = authCode;
this.redirectUri = redirectUri;
this.useConfidentialClient = !CoreUtils.isNullOrEmpty(clientSecret);
}
@Override
private boolean isCachePopulated(TokenRequestContext request) {
return (cachedToken.get() != null) && ((request.isCaeEnabled() && isCaeEnabledRequestCached)
|| (!request.isCaeEnabled() && isCaeDisabledRequestCached));
}
} | class AuthorizationCodeCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(AuthorizationCodeCredential.class);
private final String authCode;
private final URI redirectUri;
private final IdentityClient identityClient;
private final AtomicReference<MsalAuthenticationAccount> cachedToken;
private boolean isCaeEnabledRequestCached;
private boolean isCaeDisabledRequestCached;
private boolean isCachePopulated;
private final boolean useConfidentialClient;
/**
* Creates an AuthorizationCodeCredential with the given identity client options.
*
* @param clientId the client ID of the application
* @param clientSecret the client secret of the application
* @param tenantId the tenant ID of the application
* @param authCode the Oauth 2.0 authorization code grant
* @param redirectUri the redirect URI used to authenticate to Azure Active Directory
* @param identityClientOptions the options for configuring the identity client
*/
AuthorizationCodeCredential(String clientId, String clientSecret, String tenantId, String authCode,
URI redirectUri, IdentityClientOptions identityClientOptions) {
identityClient = new IdentityClientBuilder()
.tenantId(tenantId)
.clientId(clientId)
.clientSecret(clientSecret)
.identityClientOptions(identityClientOptions)
.build();
this.cachedToken = new AtomicReference<>();
this.authCode = authCode;
this.redirectUri = redirectUri;
this.useConfidentialClient = !CoreUtils.isNullOrEmpty(clientSecret);
}
@Override
private boolean isCachePopulated(TokenRequestContext request) {
return (cachedToken.get() != null) && ((request.isCaeEnabled() && isCaeEnabledRequestCached)
|| (!request.isCaeEnabled() && isCaeDisabledRequestCached));
}
} |
The reactor flow requires msal token type to be consistent with downstream. The cast helps to satisfy that. | public Mono<AccessToken> getToken(TokenRequestContext request) {
return Mono.defer(() -> {
isCachePopulated = isCachePopulated(request);
if (isCachePopulated) {
if (useConfidentialClient) {
return identityClient.authenticateWithConfidentialClientCache(request, cachedToken.get())
.map(accessToken -> (MsalToken) accessToken);
} else {
return identityClient.authenticateWithPublicClientCache(request, cachedToken.get())
.onErrorResume(t -> Mono.empty());
}
} else {
return Mono.empty();
}
}).switchIfEmpty(
Mono.defer(() -> identityClient.authenticateWithAuthorizationCode(request, authCode, redirectUri)))
.map(msalToken -> {
cachedToken.set(new MsalAuthenticationAccount(
new AuthenticationRecord(msalToken.getAuthenticationResult(),
identityClient.getTenantId(), identityClient.getClientId())));
if (request.isCaeEnabled()) {
isCaeEnabledRequestCached = true;
} else {
isCaeDisabledRequestCached = true;
}
return (AccessToken) msalToken;
})
.doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request))
.doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(),
request, error));
} | .map(accessToken -> (MsalToken) accessToken); | public Mono<AccessToken> getToken(TokenRequestContext request) {
return Mono.defer(() -> {
isCachePopulated = isCachePopulated(request);
if (isCachePopulated) {
if (useConfidentialClient) {
return identityClient.authenticateWithConfidentialClientCache(request, cachedToken.get())
.map(accessToken -> (MsalToken) accessToken);
} else {
return identityClient.authenticateWithPublicClientCache(request, cachedToken.get())
.onErrorResume(t -> Mono.empty());
}
} else {
return Mono.empty();
}
}).switchIfEmpty(
Mono.defer(() -> identityClient.authenticateWithAuthorizationCode(request, authCode, redirectUri)))
.map(msalToken -> {
cachedToken.set(new MsalAuthenticationAccount(
new AuthenticationRecord(msalToken.getAuthenticationResult(),
identityClient.getTenantId(), identityClient.getClientId())));
if (request.isCaeEnabled()) {
isCaeEnabledRequestCached = true;
} else {
isCaeDisabledRequestCached = true;
}
return (AccessToken) msalToken;
})
.doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request))
.doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(),
request, error));
} | class AuthorizationCodeCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(AuthorizationCodeCredential.class);
private final String authCode;
private final URI redirectUri;
private final IdentityClient identityClient;
private final AtomicReference<MsalAuthenticationAccount> cachedToken;
private boolean isCaeEnabledRequestCached;
private boolean isCaeDisabledRequestCached;
private boolean isCachePopulated;
private final boolean useConfidentialClient;
/**
* Creates an AuthorizationCodeCredential with the given identity client options.
*
* @param clientId the client ID of the application
* @param clientSecret the client secret of the application
* @param tenantId the tenant ID of the application
* @param authCode the Oauth 2.0 authorization code grant
* @param redirectUri the redirect URI used to authenticate to Azure Active Directory
* @param identityClientOptions the options for configuring the identity client
*/
AuthorizationCodeCredential(String clientId, String clientSecret, String tenantId, String authCode,
URI redirectUri, IdentityClientOptions identityClientOptions) {
identityClient = new IdentityClientBuilder()
.tenantId(tenantId)
.clientId(clientId)
.clientSecret(clientSecret)
.identityClientOptions(identityClientOptions)
.build();
this.cachedToken = new AtomicReference<>();
this.authCode = authCode;
this.redirectUri = redirectUri;
this.useConfidentialClient = !CoreUtils.isNullOrEmpty(clientSecret);
}
@Override
private boolean isCachePopulated(TokenRequestContext request) {
return (cachedToken.get() != null) && ((request.isCaeEnabled() && isCaeEnabledRequestCached)
|| (!request.isCaeEnabled() && isCaeDisabledRequestCached));
}
} | class AuthorizationCodeCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(AuthorizationCodeCredential.class);
private final String authCode;
private final URI redirectUri;
private final IdentityClient identityClient;
private final AtomicReference<MsalAuthenticationAccount> cachedToken;
private boolean isCaeEnabledRequestCached;
private boolean isCaeDisabledRequestCached;
private boolean isCachePopulated;
private final boolean useConfidentialClient;
/**
* Creates an AuthorizationCodeCredential with the given identity client options.
*
* @param clientId the client ID of the application
* @param clientSecret the client secret of the application
* @param tenantId the tenant ID of the application
* @param authCode the Oauth 2.0 authorization code grant
* @param redirectUri the redirect URI used to authenticate to Azure Active Directory
* @param identityClientOptions the options for configuring the identity client
*/
AuthorizationCodeCredential(String clientId, String clientSecret, String tenantId, String authCode,
URI redirectUri, IdentityClientOptions identityClientOptions) {
identityClient = new IdentityClientBuilder()
.tenantId(tenantId)
.clientId(clientId)
.clientSecret(clientSecret)
.identityClientOptions(identityClientOptions)
.build();
this.cachedToken = new AtomicReference<>();
this.authCode = authCode;
this.redirectUri = redirectUri;
this.useConfidentialClient = !CoreUtils.isNullOrEmpty(clientSecret);
}
@Override
private boolean isCachePopulated(TokenRequestContext request) {
return (cachedToken.get() != null) && ((request.isCaeEnabled() && isCaeEnabledRequestCached)
|| (!request.isCaeEnabled() && isCaeDisabledRequestCached));
}
} |
Should this be the first statement in this method? | private Mono<Variant> generateVariant(String featureName, Object featureContext) {
VariantAssignment variantAssignment = new VariantAssignment(contextAccessor, evaluationOptions, propertiesProvider);
if (!StringUtils.hasText(featureName)) {
throw new IllegalArgumentException("Feature Variant name can not be empty or null.");
}
Feature feature = featureManagementConfigurations.getFeatureManagement().get(featureName);
if (feature == null) {
throw new FeatureManagementException("The Feature " + featureName + " can not be found.");
}
validateVariant(feature, featureName);
if (!feature.getEvaluate() && StringUtils.hasText(feature.getAllocation().getDefautlWhenDisabled())) {
return variantAssignment.getVariant(feature.getVariants().values(),
feature.getAllocation().getDefautlWhenDisabled()).single();
} else if (!feature.getEvaluate()) {
return Mono.justOrEmpty(null);
} else if (feature.getEnabledFor().size() == 0) {
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : feature.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature.getKey(), featureContext));
}
}
Mono<Boolean> isEnabled;
if (ALL_REQUIREMENT_TYPE.equals(feature.getRequirementType())) {
isEnabled = Flux.merge(results).reduce((a, b) -> a && b).single();
} else {
isEnabled = Flux.merge(results).reduce((a, b) -> a || b).single();
}
return isEnabled.flatMap(enabled -> {
if (!enabled && StringUtils.hasText(feature.getAllocation().getDefautlWhenDisabled())) {
return variantAssignment.getVariant(feature.getVariants().values(),
feature.getAllocation().getDefautlWhenDisabled()).single();
} else if (!enabled) {
return Mono.justOrEmpty(null);
}
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
});
} | } | private Mono<Variant> generateVariant(String featureName, Object featureContext) {
if (!StringUtils.hasText(featureName)) {
throw new IllegalArgumentException("Feature Variant name can not be empty or null.");
}
Feature feature = featureManagementConfigurations.getFeatureManagement().get(featureName);
if (feature == null) {
throw new FeatureManagementException("The Feature " + featureName + " can not be found.");
}
validateVariant(feature);
VariantAssignment variantAssignment = new VariantAssignment(contextAccessor, evaluationOptions,
propertiesProvider);
String defaultDisabledVariant = feature.getAllocation().getDefaultWhenDisabled();
if (!feature.getEvaluate() && StringUtils.hasText(defaultDisabledVariant)) {
return variantAssignment.getVariant(feature.getVariants().values(), defaultDisabledVariant).single();
} else if (!feature.getEvaluate()) {
return Mono.justOrEmpty(null);
} else if (feature.getEnabledFor().size() == 0) {
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : feature.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature.getKey(), featureContext));
}
}
return evaluateFeatureFlagResults(feature, results).flatMap(enabled -> {
if (!enabled && StringUtils.hasText(defaultDisabledVariant)) {
return variantAssignment.getVariant(feature.getVariants().values(), defaultDisabledVariant).single();
} else if (!enabled) {
return Mono.justOrEmpty(null);
}
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
});
} | class FeatureManager {
private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManager.class);
private transient ApplicationContext context;
private final FeatureManagementProperties featureManagementConfigurations;
private transient FeatureManagementConfigProperties properties;
private final TargetingContextAccessor contextAccessor;
private final TargetingEvaluationOptions evaluationOptions;
private final ObjectProvider<VariantProperties> propertiesProvider;
/**
* Can be called to check if a feature is enabled or disabled.
*
* @param context ApplicationContext
* @param featureManagementConfigurations Configuration Properties for Feature Flags
* @param properties FeatureManagementConfigProperties
*/
FeatureManager(ApplicationContext context, FeatureManagementProperties featureManagementConfigurations,
FeatureManagementConfigProperties properties, TargetingContextAccessor contextAccessor,
TargetingEvaluationOptions evaluationOptions, ObjectProvider<VariantProperties> propertiesProvider) {
this.context = context;
this.featureManagementConfigurations = featureManagementConfigurations;
this.properties = properties;
this.contextAccessor = contextAccessor;
this.evaluationOptions = evaluationOptions;
this.propertiesProvider = propertiesProvider;
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Mono<Boolean> isEnabledAsync(String feature) {
return checkFeature(feature, null);
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Boolean isEnabled(String feature) throws FilterNotFoundException {
return checkFeature(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Variant getVariant(String feature) {
return generateVariant(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Variant getVariant(String feature, Object featureContext) {
return generateVariant(feature, featureContext).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature) {
return generateVariant(feature, null).single();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature, Object featureContext) {
return generateVariant(feature, featureContext).single();
}
private Mono<Boolean> checkFeature(String feature, Object featureContext) throws FilterNotFoundException {
if (featureManagementConfigurations.getFeatureManagement() == null
|| featureManagementConfigurations.getOnOff() == null) {
return Mono.just(false);
}
Boolean boolFeature = featureManagementConfigurations.getOnOff().get(feature);
if (boolFeature != null) {
return Mono.just(boolFeature);
}
Feature featureItem = featureManagementConfigurations.getFeatureManagement().get(feature);
if (featureItem == null || !featureItem.getEvaluate()) {
return Mono.just(false);
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : featureItem.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature, featureContext));
}
}
if (results.size() == 0) {
return Mono.just(false);
}
if (featureItem.getRequirementType().equals("All")) {
return Flux.merge(results).reduce((a, b) -> a && b).single();
}
return Flux.merge(results).reduce((a, b) -> a || b).single();
}
private void validateVariant(Feature feature, String featureName) {
if (feature.getVariants() == null || feature.getVariants().size() == 0) {
throw new FeatureManagementException("The feature " + feature.getKey() + " has no assigned Variants.");
}
for (VariantReference variant : feature.getVariants().values()) {
if (!StringUtils.hasText(variant.getName())) {
throw new FeatureManagementException("Variant needs a name");
}
if (variant.getConfigurationValue() == null && variant.getConfigurationReference() == null) {
throw new FeatureManagementException(
"The feature " + feature.getKey() + " neededs a Configuration Value or Configuration Reference.");
}
}
}
private Mono<Boolean> isFeatureOn(FeatureFilterEvaluationContext filter, String feature, Object featureContext) {
try {
Object featureFilter = context.getBean(filter.getName());
filter.setFeatureName(feature);
if (featureFilter instanceof FeatureFilter) {
return Mono.just(((FeatureFilter) featureFilter).evaluate(filter));
} else if (featureFilter instanceof ContextualFeatureFilter) {
return Mono.just(((ContextualFeatureFilter) featureFilter).evaluate(filter, featureContext));
} else if (featureFilter instanceof FeatureFilterAsync) {
return ((FeatureFilterAsync) featureFilter).evaluateAsync(filter);
} else if (featureFilter instanceof ContextualFeatureFilterAsync) {
return ((ContextualFeatureFilterAsync) featureFilter).evaluateAsync(filter, featureContext);
}
} catch (NoSuchBeanDefinitionException e) {
LOGGER.error("Was unable to find Filter {}. Does the class exist and set as an @Component?",
filter.getName());
if (properties.isFailFast()) {
String message = "Fail fast is set and a Filter was unable to be found";
ReflectionUtils.rethrowRuntimeException(new FilterNotFoundException(message, e, filter));
}
}
return Mono.just(false);
}
/**
* Returns the names of all features flags
*
* @return a set of all feature names
*/
public Set<String> getAllFeatureNames() {
Set<String> allFeatures = new HashSet<>();
allFeatures.addAll(featureManagementConfigurations.getOnOff().keySet());
allFeatures.addAll(featureManagementConfigurations.getFeatureManagement().keySet());
return allFeatures;
}
/**
* @return the featureManagement
*/
Map<String, Feature> getFeatureManagement() {
return featureManagementConfigurations.getFeatureManagement();
}
/**
* @return the onOff
*/
Map<String, Boolean> getOnOff() {
return featureManagementConfigurations.getOnOff();
}
} | class FeatureManager {
private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManager.class);
private transient ApplicationContext context;
private final FeatureManagementProperties featureManagementConfigurations;
private transient FeatureManagementConfigProperties properties;
private final TargetingContextAccessor contextAccessor;
private final TargetingEvaluationOptions evaluationOptions;
private final ObjectProvider<VariantProperties> propertiesProvider;
/**
* Can be called to check if a feature is enabled or disabled.
*
* @param context ApplicationContext
* @param featureManagementConfigurations Configuration Properties for Feature Flags
* @param properties FeatureManagementConfigProperties
*/
FeatureManager(ApplicationContext context, FeatureManagementProperties featureManagementConfigurations,
FeatureManagementConfigProperties properties, TargetingContextAccessor contextAccessor,
TargetingEvaluationOptions evaluationOptions, ObjectProvider<VariantProperties> propertiesProvider) {
this.context = context;
this.featureManagementConfigurations = featureManagementConfigurations;
this.properties = properties;
this.contextAccessor = contextAccessor;
this.evaluationOptions = evaluationOptions;
this.propertiesProvider = propertiesProvider;
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Mono<Boolean> isEnabledAsync(String feature) {
return checkFeature(feature, null);
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Boolean isEnabled(String feature) throws FilterNotFoundException {
return checkFeature(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Variant getVariant(String feature) {
return generateVariant(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Variant getVariant(String feature, Object featureContext) {
return generateVariant(feature, featureContext).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature) {
return generateVariant(feature, null).single();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature, Object featureContext) {
return generateVariant(feature, featureContext).single();
}
private Mono<Boolean> checkFeature(String feature, Object featureContext) throws FilterNotFoundException {
if (featureManagementConfigurations.getFeatureManagement() == null
|| featureManagementConfigurations.getOnOff() == null) {
return Mono.just(false);
}
Boolean boolFeature = featureManagementConfigurations.getOnOff().get(feature);
if (boolFeature != null) {
return Mono.just(boolFeature);
}
Feature featureItem = featureManagementConfigurations.getFeatureManagement().get(feature);
if (featureItem == null || !featureItem.getEvaluate()) {
return Mono.just(false);
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : featureItem.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature, featureContext));
}
}
if (results.size() == 0) {
return Mono.just(false);
}
return evaluateFeatureFlagResults(featureItem, results);
}
private Mono<Boolean> evaluateFeatureFlagResults(Feature feature, List<Mono<Boolean>> results) {
if (ALL_REQUIREMENT_TYPE.equals(feature.getRequirementType())) {
return Flux.merge(results).reduce((a, b) -> a && b).single();
}
return Flux.merge(results).reduce((a, b) -> a || b).single();
}
private void validateVariant(Feature feature) {
if (feature.getVariants() == null || feature.getVariants().size() == 0) {
throw new FeatureManagementException("The feature " + feature.getKey() + " has no assigned Variants.");
}
for (VariantReference variant : feature.getVariants().values()) {
if (!StringUtils.hasText(variant.getName())) {
throw new FeatureManagementException("Variant needs a name");
}
if (variant.getConfigurationValue() == null && variant.getConfigurationReference() == null) {
throw new FeatureManagementException(
"The feature " + feature.getKey() + " needs a Configuration Value or Configuration Reference.");
}
}
}
private Mono<Boolean> isFeatureOn(FeatureFilterEvaluationContext filter, String feature, Object featureContext) {
try {
Object featureFilter = context.getBean(filter.getName());
filter.setFeatureName(feature);
if (featureFilter instanceof FeatureFilter) {
return Mono.just(((FeatureFilter) featureFilter).evaluate(filter));
} else if (featureFilter instanceof ContextualFeatureFilter) {
return Mono.just(((ContextualFeatureFilter) featureFilter).evaluate(filter, featureContext));
} else if (featureFilter instanceof FeatureFilterAsync) {
return ((FeatureFilterAsync) featureFilter).evaluateAsync(filter);
} else if (featureFilter instanceof ContextualFeatureFilterAsync) {
return ((ContextualFeatureFilterAsync) featureFilter).evaluateAsync(filter, featureContext);
}
} catch (NoSuchBeanDefinitionException e) {
LOGGER.error("Was unable to find Filter {}. Does the class exist and set as an @Component?",
filter.getName());
if (properties.isFailFast()) {
String message = "Fail fast is set and a Filter was unable to be found";
ReflectionUtils.rethrowRuntimeException(new FilterNotFoundException(message, e, filter));
}
}
return Mono.just(false);
}
/**
* Returns the names of all features flags
*
* @return a set of all feature names
*/
public Set<String> getAllFeatureNames() {
Set<String> allFeatures = new HashSet<>();
allFeatures.addAll(featureManagementConfigurations.getOnOff().keySet());
allFeatures.addAll(featureManagementConfigurations.getFeatureManagement().keySet());
return allFeatures;
}
/**
* @return the featureManagement
*/
Map<String, Feature> getFeatureManagement() {
return featureManagementConfigurations.getFeatureManagement();
}
/**
* @return the onOff
*/
Map<String, Boolean> getOnOff() {
return featureManagementConfigurations.getOnOff();
}
} |
This line can be put after `validateVariant(feature, featureName);` | private Mono<Variant> generateVariant(String featureName, Object featureContext) {
VariantAssignment variantAssignment = new VariantAssignment(contextAccessor, evaluationOptions, propertiesProvider);
if (!StringUtils.hasText(featureName)) {
throw new IllegalArgumentException("Feature Variant name can not be empty or null.");
}
Feature feature = featureManagementConfigurations.getFeatureManagement().get(featureName);
if (feature == null) {
throw new FeatureManagementException("The Feature " + featureName + " can not be found.");
}
validateVariant(feature, featureName);
if (!feature.getEvaluate() && StringUtils.hasText(feature.getAllocation().getDefautlWhenDisabled())) {
return variantAssignment.getVariant(feature.getVariants().values(),
feature.getAllocation().getDefautlWhenDisabled()).single();
} else if (!feature.getEvaluate()) {
return Mono.justOrEmpty(null);
} else if (feature.getEnabledFor().size() == 0) {
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : feature.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature.getKey(), featureContext));
}
}
Mono<Boolean> isEnabled;
if (ALL_REQUIREMENT_TYPE.equals(feature.getRequirementType())) {
isEnabled = Flux.merge(results).reduce((a, b) -> a && b).single();
} else {
isEnabled = Flux.merge(results).reduce((a, b) -> a || b).single();
}
return isEnabled.flatMap(enabled -> {
if (!enabled && StringUtils.hasText(feature.getAllocation().getDefautlWhenDisabled())) {
return variantAssignment.getVariant(feature.getVariants().values(),
feature.getAllocation().getDefautlWhenDisabled()).single();
} else if (!enabled) {
return Mono.justOrEmpty(null);
}
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
});
} | VariantAssignment variantAssignment = new VariantAssignment(contextAccessor, evaluationOptions, propertiesProvider); | private Mono<Variant> generateVariant(String featureName, Object featureContext) {
if (!StringUtils.hasText(featureName)) {
throw new IllegalArgumentException("Feature Variant name can not be empty or null.");
}
Feature feature = featureManagementConfigurations.getFeatureManagement().get(featureName);
if (feature == null) {
throw new FeatureManagementException("The Feature " + featureName + " can not be found.");
}
validateVariant(feature);
VariantAssignment variantAssignment = new VariantAssignment(contextAccessor, evaluationOptions,
propertiesProvider);
String defaultDisabledVariant = feature.getAllocation().getDefaultWhenDisabled();
if (!feature.getEvaluate() && StringUtils.hasText(defaultDisabledVariant)) {
return variantAssignment.getVariant(feature.getVariants().values(), defaultDisabledVariant).single();
} else if (!feature.getEvaluate()) {
return Mono.justOrEmpty(null);
} else if (feature.getEnabledFor().size() == 0) {
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : feature.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature.getKey(), featureContext));
}
}
return evaluateFeatureFlagResults(feature, results).flatMap(enabled -> {
if (!enabled && StringUtils.hasText(defaultDisabledVariant)) {
return variantAssignment.getVariant(feature.getVariants().values(), defaultDisabledVariant).single();
} else if (!enabled) {
return Mono.justOrEmpty(null);
}
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
});
} | class FeatureManager {
private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManager.class);
private transient ApplicationContext context;
private final FeatureManagementProperties featureManagementConfigurations;
private transient FeatureManagementConfigProperties properties;
private final TargetingContextAccessor contextAccessor;
private final TargetingEvaluationOptions evaluationOptions;
private final ObjectProvider<VariantProperties> propertiesProvider;
/**
* Can be called to check if a feature is enabled or disabled.
*
* @param context ApplicationContext
* @param featureManagementConfigurations Configuration Properties for Feature Flags
* @param properties FeatureManagementConfigProperties
*/
FeatureManager(ApplicationContext context, FeatureManagementProperties featureManagementConfigurations,
FeatureManagementConfigProperties properties, TargetingContextAccessor contextAccessor,
TargetingEvaluationOptions evaluationOptions, ObjectProvider<VariantProperties> propertiesProvider) {
this.context = context;
this.featureManagementConfigurations = featureManagementConfigurations;
this.properties = properties;
this.contextAccessor = contextAccessor;
this.evaluationOptions = evaluationOptions;
this.propertiesProvider = propertiesProvider;
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Mono<Boolean> isEnabledAsync(String feature) {
return checkFeature(feature, null);
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Boolean isEnabled(String feature) throws FilterNotFoundException {
return checkFeature(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Variant getVariant(String feature) {
return generateVariant(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Variant getVariant(String feature, Object featureContext) {
return generateVariant(feature, featureContext).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature) {
return generateVariant(feature, null).single();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature, Object featureContext) {
return generateVariant(feature, featureContext).single();
}
private Mono<Boolean> checkFeature(String feature, Object featureContext) throws FilterNotFoundException {
if (featureManagementConfigurations.getFeatureManagement() == null
|| featureManagementConfigurations.getOnOff() == null) {
return Mono.just(false);
}
Boolean boolFeature = featureManagementConfigurations.getOnOff().get(feature);
if (boolFeature != null) {
return Mono.just(boolFeature);
}
Feature featureItem = featureManagementConfigurations.getFeatureManagement().get(feature);
if (featureItem == null || !featureItem.getEvaluate()) {
return Mono.just(false);
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : featureItem.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature, featureContext));
}
}
if (results.size() == 0) {
return Mono.just(false);
}
if (featureItem.getRequirementType().equals("All")) {
return Flux.merge(results).reduce((a, b) -> a && b).single();
}
return Flux.merge(results).reduce((a, b) -> a || b).single();
}
private void validateVariant(Feature feature, String featureName) {
if (feature.getVariants() == null || feature.getVariants().size() == 0) {
throw new FeatureManagementException("The feature " + feature.getKey() + " has no assigned Variants.");
}
for (VariantReference variant : feature.getVariants().values()) {
if (!StringUtils.hasText(variant.getName())) {
throw new FeatureManagementException("Variant needs a name");
}
if (variant.getConfigurationValue() == null && variant.getConfigurationReference() == null) {
throw new FeatureManagementException(
"The feature " + feature.getKey() + " neededs a Configuration Value or Configuration Reference.");
}
}
}
private Mono<Boolean> isFeatureOn(FeatureFilterEvaluationContext filter, String feature, Object featureContext) {
try {
Object featureFilter = context.getBean(filter.getName());
filter.setFeatureName(feature);
if (featureFilter instanceof FeatureFilter) {
return Mono.just(((FeatureFilter) featureFilter).evaluate(filter));
} else if (featureFilter instanceof ContextualFeatureFilter) {
return Mono.just(((ContextualFeatureFilter) featureFilter).evaluate(filter, featureContext));
} else if (featureFilter instanceof FeatureFilterAsync) {
return ((FeatureFilterAsync) featureFilter).evaluateAsync(filter);
} else if (featureFilter instanceof ContextualFeatureFilterAsync) {
return ((ContextualFeatureFilterAsync) featureFilter).evaluateAsync(filter, featureContext);
}
} catch (NoSuchBeanDefinitionException e) {
LOGGER.error("Was unable to find Filter {}. Does the class exist and set as an @Component?",
filter.getName());
if (properties.isFailFast()) {
String message = "Fail fast is set and a Filter was unable to be found";
ReflectionUtils.rethrowRuntimeException(new FilterNotFoundException(message, e, filter));
}
}
return Mono.just(false);
}
/**
* Returns the names of all features flags
*
* @return a set of all feature names
*/
public Set<String> getAllFeatureNames() {
Set<String> allFeatures = new HashSet<>();
allFeatures.addAll(featureManagementConfigurations.getOnOff().keySet());
allFeatures.addAll(featureManagementConfigurations.getFeatureManagement().keySet());
return allFeatures;
}
/**
* @return the featureManagement
*/
Map<String, Feature> getFeatureManagement() {
return featureManagementConfigurations.getFeatureManagement();
}
/**
* @return the onOff
*/
Map<String, Boolean> getOnOff() {
return featureManagementConfigurations.getOnOff();
}
} | class FeatureManager {
private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManager.class);
private transient ApplicationContext context;
private final FeatureManagementProperties featureManagementConfigurations;
private transient FeatureManagementConfigProperties properties;
private final TargetingContextAccessor contextAccessor;
private final TargetingEvaluationOptions evaluationOptions;
private final ObjectProvider<VariantProperties> propertiesProvider;
/**
* Can be called to check if a feature is enabled or disabled.
*
* @param context ApplicationContext
* @param featureManagementConfigurations Configuration Properties for Feature Flags
* @param properties FeatureManagementConfigProperties
*/
FeatureManager(ApplicationContext context, FeatureManagementProperties featureManagementConfigurations,
FeatureManagementConfigProperties properties, TargetingContextAccessor contextAccessor,
TargetingEvaluationOptions evaluationOptions, ObjectProvider<VariantProperties> propertiesProvider) {
this.context = context;
this.featureManagementConfigurations = featureManagementConfigurations;
this.properties = properties;
this.contextAccessor = contextAccessor;
this.evaluationOptions = evaluationOptions;
this.propertiesProvider = propertiesProvider;
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Mono<Boolean> isEnabledAsync(String feature) {
return checkFeature(feature, null);
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Boolean isEnabled(String feature) throws FilterNotFoundException {
return checkFeature(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Variant getVariant(String feature) {
return generateVariant(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Variant getVariant(String feature, Object featureContext) {
return generateVariant(feature, featureContext).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature) {
return generateVariant(feature, null).single();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature, Object featureContext) {
return generateVariant(feature, featureContext).single();
}
private Mono<Boolean> checkFeature(String feature, Object featureContext) throws FilterNotFoundException {
if (featureManagementConfigurations.getFeatureManagement() == null
|| featureManagementConfigurations.getOnOff() == null) {
return Mono.just(false);
}
Boolean boolFeature = featureManagementConfigurations.getOnOff().get(feature);
if (boolFeature != null) {
return Mono.just(boolFeature);
}
Feature featureItem = featureManagementConfigurations.getFeatureManagement().get(feature);
if (featureItem == null || !featureItem.getEvaluate()) {
return Mono.just(false);
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : featureItem.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature, featureContext));
}
}
if (results.size() == 0) {
return Mono.just(false);
}
return evaluateFeatureFlagResults(featureItem, results);
}
private Mono<Boolean> evaluateFeatureFlagResults(Feature feature, List<Mono<Boolean>> results) {
if (ALL_REQUIREMENT_TYPE.equals(feature.getRequirementType())) {
return Flux.merge(results).reduce((a, b) -> a && b).single();
}
return Flux.merge(results).reduce((a, b) -> a || b).single();
}
private void validateVariant(Feature feature) {
if (feature.getVariants() == null || feature.getVariants().size() == 0) {
throw new FeatureManagementException("The feature " + feature.getKey() + " has no assigned Variants.");
}
for (VariantReference variant : feature.getVariants().values()) {
if (!StringUtils.hasText(variant.getName())) {
throw new FeatureManagementException("Variant needs a name");
}
if (variant.getConfigurationValue() == null && variant.getConfigurationReference() == null) {
throw new FeatureManagementException(
"The feature " + feature.getKey() + " needs a Configuration Value or Configuration Reference.");
}
}
}
private Mono<Boolean> isFeatureOn(FeatureFilterEvaluationContext filter, String feature, Object featureContext) {
try {
Object featureFilter = context.getBean(filter.getName());
filter.setFeatureName(feature);
if (featureFilter instanceof FeatureFilter) {
return Mono.just(((FeatureFilter) featureFilter).evaluate(filter));
} else if (featureFilter instanceof ContextualFeatureFilter) {
return Mono.just(((ContextualFeatureFilter) featureFilter).evaluate(filter, featureContext));
} else if (featureFilter instanceof FeatureFilterAsync) {
return ((FeatureFilterAsync) featureFilter).evaluateAsync(filter);
} else if (featureFilter instanceof ContextualFeatureFilterAsync) {
return ((ContextualFeatureFilterAsync) featureFilter).evaluateAsync(filter, featureContext);
}
} catch (NoSuchBeanDefinitionException e) {
LOGGER.error("Was unable to find Filter {}. Does the class exist and set as an @Component?",
filter.getName());
if (properties.isFailFast()) {
String message = "Fail fast is set and a Filter was unable to be found";
ReflectionUtils.rethrowRuntimeException(new FilterNotFoundException(message, e, filter));
}
}
return Mono.just(false);
}
/**
* Returns the names of all features flags
*
* @return a set of all feature names
*/
public Set<String> getAllFeatureNames() {
Set<String> allFeatures = new HashSet<>();
allFeatures.addAll(featureManagementConfigurations.getOnOff().keySet());
allFeatures.addAll(featureManagementConfigurations.getFeatureManagement().keySet());
return allFeatures;
}
/**
* @return the featureManagement
*/
Map<String, Feature> getFeatureManagement() {
return featureManagementConfigurations.getFeatureManagement();
}
/**
* @return the onOff
*/
Map<String, Boolean> getOnOff() {
return featureManagementConfigurations.getOnOff();
}
} |
Seems like the `feature.getAllocation().getDefautlWhenDisabled()` is used in several places, so I suggest we extract a variable for it. | private Mono<Variant> generateVariant(String featureName, Object featureContext) {
VariantAssignment variantAssignment = new VariantAssignment(contextAccessor, evaluationOptions, propertiesProvider);
if (!StringUtils.hasText(featureName)) {
throw new IllegalArgumentException("Feature Variant name can not be empty or null.");
}
Feature feature = featureManagementConfigurations.getFeatureManagement().get(featureName);
if (feature == null) {
throw new FeatureManagementException("The Feature " + featureName + " can not be found.");
}
validateVariant(feature, featureName);
if (!feature.getEvaluate() && StringUtils.hasText(feature.getAllocation().getDefautlWhenDisabled())) {
return variantAssignment.getVariant(feature.getVariants().values(),
feature.getAllocation().getDefautlWhenDisabled()).single();
} else if (!feature.getEvaluate()) {
return Mono.justOrEmpty(null);
} else if (feature.getEnabledFor().size() == 0) {
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : feature.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature.getKey(), featureContext));
}
}
Mono<Boolean> isEnabled;
if (ALL_REQUIREMENT_TYPE.equals(feature.getRequirementType())) {
isEnabled = Flux.merge(results).reduce((a, b) -> a && b).single();
} else {
isEnabled = Flux.merge(results).reduce((a, b) -> a || b).single();
}
return isEnabled.flatMap(enabled -> {
if (!enabled && StringUtils.hasText(feature.getAllocation().getDefautlWhenDisabled())) {
return variantAssignment.getVariant(feature.getVariants().values(),
feature.getAllocation().getDefautlWhenDisabled()).single();
} else if (!enabled) {
return Mono.justOrEmpty(null);
}
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
});
} | if (!feature.getEvaluate() && StringUtils.hasText(feature.getAllocation().getDefautlWhenDisabled())) { | private Mono<Variant> generateVariant(String featureName, Object featureContext) {
if (!StringUtils.hasText(featureName)) {
throw new IllegalArgumentException("Feature Variant name can not be empty or null.");
}
Feature feature = featureManagementConfigurations.getFeatureManagement().get(featureName);
if (feature == null) {
throw new FeatureManagementException("The Feature " + featureName + " can not be found.");
}
validateVariant(feature);
VariantAssignment variantAssignment = new VariantAssignment(contextAccessor, evaluationOptions,
propertiesProvider);
String defaultDisabledVariant = feature.getAllocation().getDefaultWhenDisabled();
if (!feature.getEvaluate() && StringUtils.hasText(defaultDisabledVariant)) {
return variantAssignment.getVariant(feature.getVariants().values(), defaultDisabledVariant).single();
} else if (!feature.getEvaluate()) {
return Mono.justOrEmpty(null);
} else if (feature.getEnabledFor().size() == 0) {
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : feature.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature.getKey(), featureContext));
}
}
return evaluateFeatureFlagResults(feature, results).flatMap(enabled -> {
if (!enabled && StringUtils.hasText(defaultDisabledVariant)) {
return variantAssignment.getVariant(feature.getVariants().values(), defaultDisabledVariant).single();
} else if (!enabled) {
return Mono.justOrEmpty(null);
}
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
});
} | class FeatureManager {
private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManager.class);
private transient ApplicationContext context;
private final FeatureManagementProperties featureManagementConfigurations;
private transient FeatureManagementConfigProperties properties;
private final TargetingContextAccessor contextAccessor;
private final TargetingEvaluationOptions evaluationOptions;
private final ObjectProvider<VariantProperties> propertiesProvider;
/**
* Can be called to check if a feature is enabled or disabled.
*
* @param context ApplicationContext
* @param featureManagementConfigurations Configuration Properties for Feature Flags
* @param properties FeatureManagementConfigProperties
*/
FeatureManager(ApplicationContext context, FeatureManagementProperties featureManagementConfigurations,
FeatureManagementConfigProperties properties, TargetingContextAccessor contextAccessor,
TargetingEvaluationOptions evaluationOptions, ObjectProvider<VariantProperties> propertiesProvider) {
this.context = context;
this.featureManagementConfigurations = featureManagementConfigurations;
this.properties = properties;
this.contextAccessor = contextAccessor;
this.evaluationOptions = evaluationOptions;
this.propertiesProvider = propertiesProvider;
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Mono<Boolean> isEnabledAsync(String feature) {
return checkFeature(feature, null);
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Boolean isEnabled(String feature) throws FilterNotFoundException {
return checkFeature(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Variant getVariant(String feature) {
return generateVariant(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Variant getVariant(String feature, Object featureContext) {
return generateVariant(feature, featureContext).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature) {
return generateVariant(feature, null).single();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature, Object featureContext) {
return generateVariant(feature, featureContext).single();
}
private Mono<Boolean> checkFeature(String feature, Object featureContext) throws FilterNotFoundException {
if (featureManagementConfigurations.getFeatureManagement() == null
|| featureManagementConfigurations.getOnOff() == null) {
return Mono.just(false);
}
Boolean boolFeature = featureManagementConfigurations.getOnOff().get(feature);
if (boolFeature != null) {
return Mono.just(boolFeature);
}
Feature featureItem = featureManagementConfigurations.getFeatureManagement().get(feature);
if (featureItem == null || !featureItem.getEvaluate()) {
return Mono.just(false);
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : featureItem.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature, featureContext));
}
}
if (results.size() == 0) {
return Mono.just(false);
}
if (featureItem.getRequirementType().equals("All")) {
return Flux.merge(results).reduce((a, b) -> a && b).single();
}
return Flux.merge(results).reduce((a, b) -> a || b).single();
}
private void validateVariant(Feature feature, String featureName) {
if (feature.getVariants() == null || feature.getVariants().size() == 0) {
throw new FeatureManagementException("The feature " + feature.getKey() + " has no assigned Variants.");
}
for (VariantReference variant : feature.getVariants().values()) {
if (!StringUtils.hasText(variant.getName())) {
throw new FeatureManagementException("Variant needs a name");
}
if (variant.getConfigurationValue() == null && variant.getConfigurationReference() == null) {
throw new FeatureManagementException(
"The feature " + feature.getKey() + " neededs a Configuration Value or Configuration Reference.");
}
}
}
private Mono<Boolean> isFeatureOn(FeatureFilterEvaluationContext filter, String feature, Object featureContext) {
try {
Object featureFilter = context.getBean(filter.getName());
filter.setFeatureName(feature);
if (featureFilter instanceof FeatureFilter) {
return Mono.just(((FeatureFilter) featureFilter).evaluate(filter));
} else if (featureFilter instanceof ContextualFeatureFilter) {
return Mono.just(((ContextualFeatureFilter) featureFilter).evaluate(filter, featureContext));
} else if (featureFilter instanceof FeatureFilterAsync) {
return ((FeatureFilterAsync) featureFilter).evaluateAsync(filter);
} else if (featureFilter instanceof ContextualFeatureFilterAsync) {
return ((ContextualFeatureFilterAsync) featureFilter).evaluateAsync(filter, featureContext);
}
} catch (NoSuchBeanDefinitionException e) {
LOGGER.error("Was unable to find Filter {}. Does the class exist and set as an @Component?",
filter.getName());
if (properties.isFailFast()) {
String message = "Fail fast is set and a Filter was unable to be found";
ReflectionUtils.rethrowRuntimeException(new FilterNotFoundException(message, e, filter));
}
}
return Mono.just(false);
}
/**
* Returns the names of all features flags
*
* @return a set of all feature names
*/
public Set<String> getAllFeatureNames() {
Set<String> allFeatures = new HashSet<>();
allFeatures.addAll(featureManagementConfigurations.getOnOff().keySet());
allFeatures.addAll(featureManagementConfigurations.getFeatureManagement().keySet());
return allFeatures;
}
/**
* @return the featureManagement
*/
Map<String, Feature> getFeatureManagement() {
return featureManagementConfigurations.getFeatureManagement();
}
/**
* @return the onOff
*/
Map<String, Boolean> getOnOff() {
return featureManagementConfigurations.getOnOff();
}
} | class FeatureManager {
private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManager.class);
private transient ApplicationContext context;
private final FeatureManagementProperties featureManagementConfigurations;
private transient FeatureManagementConfigProperties properties;
private final TargetingContextAccessor contextAccessor;
private final TargetingEvaluationOptions evaluationOptions;
private final ObjectProvider<VariantProperties> propertiesProvider;
/**
* Can be called to check if a feature is enabled or disabled.
*
* @param context ApplicationContext
* @param featureManagementConfigurations Configuration Properties for Feature Flags
* @param properties FeatureManagementConfigProperties
*/
FeatureManager(ApplicationContext context, FeatureManagementProperties featureManagementConfigurations,
FeatureManagementConfigProperties properties, TargetingContextAccessor contextAccessor,
TargetingEvaluationOptions evaluationOptions, ObjectProvider<VariantProperties> propertiesProvider) {
this.context = context;
this.featureManagementConfigurations = featureManagementConfigurations;
this.properties = properties;
this.contextAccessor = contextAccessor;
this.evaluationOptions = evaluationOptions;
this.propertiesProvider = propertiesProvider;
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Mono<Boolean> isEnabledAsync(String feature) {
return checkFeature(feature, null);
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Boolean isEnabled(String feature) throws FilterNotFoundException {
return checkFeature(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Variant getVariant(String feature) {
return generateVariant(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Variant getVariant(String feature, Object featureContext) {
return generateVariant(feature, featureContext).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature) {
return generateVariant(feature, null).single();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature, Object featureContext) {
return generateVariant(feature, featureContext).single();
}
private Mono<Boolean> checkFeature(String feature, Object featureContext) throws FilterNotFoundException {
if (featureManagementConfigurations.getFeatureManagement() == null
|| featureManagementConfigurations.getOnOff() == null) {
return Mono.just(false);
}
Boolean boolFeature = featureManagementConfigurations.getOnOff().get(feature);
if (boolFeature != null) {
return Mono.just(boolFeature);
}
Feature featureItem = featureManagementConfigurations.getFeatureManagement().get(feature);
if (featureItem == null || !featureItem.getEvaluate()) {
return Mono.just(false);
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : featureItem.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature, featureContext));
}
}
if (results.size() == 0) {
return Mono.just(false);
}
return evaluateFeatureFlagResults(featureItem, results);
}
private Mono<Boolean> evaluateFeatureFlagResults(Feature feature, List<Mono<Boolean>> results) {
if (ALL_REQUIREMENT_TYPE.equals(feature.getRequirementType())) {
return Flux.merge(results).reduce((a, b) -> a && b).single();
}
return Flux.merge(results).reduce((a, b) -> a || b).single();
}
private void validateVariant(Feature feature) {
if (feature.getVariants() == null || feature.getVariants().size() == 0) {
throw new FeatureManagementException("The feature " + feature.getKey() + " has no assigned Variants.");
}
for (VariantReference variant : feature.getVariants().values()) {
if (!StringUtils.hasText(variant.getName())) {
throw new FeatureManagementException("Variant needs a name");
}
if (variant.getConfigurationValue() == null && variant.getConfigurationReference() == null) {
throw new FeatureManagementException(
"The feature " + feature.getKey() + " needs a Configuration Value or Configuration Reference.");
}
}
}
private Mono<Boolean> isFeatureOn(FeatureFilterEvaluationContext filter, String feature, Object featureContext) {
try {
Object featureFilter = context.getBean(filter.getName());
filter.setFeatureName(feature);
if (featureFilter instanceof FeatureFilter) {
return Mono.just(((FeatureFilter) featureFilter).evaluate(filter));
} else if (featureFilter instanceof ContextualFeatureFilter) {
return Mono.just(((ContextualFeatureFilter) featureFilter).evaluate(filter, featureContext));
} else if (featureFilter instanceof FeatureFilterAsync) {
return ((FeatureFilterAsync) featureFilter).evaluateAsync(filter);
} else if (featureFilter instanceof ContextualFeatureFilterAsync) {
return ((ContextualFeatureFilterAsync) featureFilter).evaluateAsync(filter, featureContext);
}
} catch (NoSuchBeanDefinitionException e) {
LOGGER.error("Was unable to find Filter {}. Does the class exist and set as an @Component?",
filter.getName());
if (properties.isFailFast()) {
String message = "Fail fast is set and a Filter was unable to be found";
ReflectionUtils.rethrowRuntimeException(new FilterNotFoundException(message, e, filter));
}
}
return Mono.just(false);
}
/**
* Returns the names of all features flags
*
* @return a set of all feature names
*/
public Set<String> getAllFeatureNames() {
Set<String> allFeatures = new HashSet<>();
allFeatures.addAll(featureManagementConfigurations.getOnOff().keySet());
allFeatures.addAll(featureManagementConfigurations.getFeatureManagement().keySet());
return allFeatures;
}
/**
* @return the featureManagement
*/
Map<String, Feature> getFeatureManagement() {
return featureManagementConfigurations.getFeatureManagement();
}
/**
* @return the onOff
*/
Map<String, Boolean> getOnOff() {
return featureManagementConfigurations.getOnOff();
}
} |
This can be extracted to a method, I am seeing the similar below: ```java // All Filters must be true if (ALL_REQUIREMENT_TYPE.equals(feature.getRequirementType())) { isEnabled = Flux.merge(results).reduce((a, b) -> a && b).single(); } else { // Any Filter must be true isEnabled = Flux.merge(results).reduce((a, b) -> a || b).single(); } ``` | private Mono<Boolean> checkFeature(String feature, Object featureContext) throws FilterNotFoundException {
if (featureManagementConfigurations.getFeatureManagement() == null
|| featureManagementConfigurations.getOnOff() == null) {
return Mono.just(false);
}
Boolean boolFeature = featureManagementConfigurations.getOnOff().get(feature);
if (boolFeature != null) {
return Mono.just(boolFeature);
}
Feature featureItem = featureManagementConfigurations.getFeatureManagement().get(feature);
if (featureItem == null || !featureItem.getEvaluate()) {
return Mono.just(false);
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : featureItem.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature, featureContext));
}
}
if (results.size() == 0) {
return Mono.just(false);
}
if (featureItem.getRequirementType().equals("All")) {
return Flux.merge(results).reduce((a, b) -> a && b).single();
}
return Flux.merge(results).reduce((a, b) -> a || b).single();
} | return Flux.merge(results).reduce((a, b) -> a || b).single(); | private Mono<Boolean> checkFeature(String feature, Object featureContext) throws FilterNotFoundException {
if (featureManagementConfigurations.getFeatureManagement() == null
|| featureManagementConfigurations.getOnOff() == null) {
return Mono.just(false);
}
Boolean boolFeature = featureManagementConfigurations.getOnOff().get(feature);
if (boolFeature != null) {
return Mono.just(boolFeature);
}
Feature featureItem = featureManagementConfigurations.getFeatureManagement().get(feature);
if (featureItem == null || !featureItem.getEvaluate()) {
return Mono.just(false);
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : featureItem.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature, featureContext));
}
}
if (results.size() == 0) {
return Mono.just(false);
}
return evaluateFeatureFlagResults(featureItem, results);
} | class FeatureManager {
private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManager.class);
private transient ApplicationContext context;
private final FeatureManagementProperties featureManagementConfigurations;
private transient FeatureManagementConfigProperties properties;
private final TargetingContextAccessor contextAccessor;
private final TargetingEvaluationOptions evaluationOptions;
private final ObjectProvider<VariantProperties> propertiesProvider;
/**
* Can be called to check if a feature is enabled or disabled.
*
* @param context ApplicationContext
* @param featureManagementConfigurations Configuration Properties for Feature Flags
* @param properties FeatureManagementConfigProperties
*/
FeatureManager(ApplicationContext context, FeatureManagementProperties featureManagementConfigurations,
FeatureManagementConfigProperties properties, TargetingContextAccessor contextAccessor,
TargetingEvaluationOptions evaluationOptions, ObjectProvider<VariantProperties> propertiesProvider) {
this.context = context;
this.featureManagementConfigurations = featureManagementConfigurations;
this.properties = properties;
this.contextAccessor = contextAccessor;
this.evaluationOptions = evaluationOptions;
this.propertiesProvider = propertiesProvider;
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Mono<Boolean> isEnabledAsync(String feature) {
return checkFeature(feature, null);
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Boolean isEnabled(String feature) throws FilterNotFoundException {
return checkFeature(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Variant getVariant(String feature) {
return generateVariant(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Variant getVariant(String feature, Object featureContext) {
return generateVariant(feature, featureContext).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature) {
return generateVariant(feature, null).single();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature, Object featureContext) {
return generateVariant(feature, featureContext).single();
}
private Mono<Variant> generateVariant(String featureName, Object featureContext) {
VariantAssignment variantAssignment = new VariantAssignment(contextAccessor, evaluationOptions, propertiesProvider);
if (!StringUtils.hasText(featureName)) {
throw new IllegalArgumentException("Feature Variant name can not be empty or null.");
}
Feature feature = featureManagementConfigurations.getFeatureManagement().get(featureName);
if (feature == null) {
throw new FeatureManagementException("The Feature " + featureName + " can not be found.");
}
validateVariant(feature, featureName);
if (!feature.getEvaluate() && StringUtils.hasText(feature.getAllocation().getDefautlWhenDisabled())) {
return variantAssignment.getVariant(feature.getVariants().values(),
feature.getAllocation().getDefautlWhenDisabled()).single();
} else if (!feature.getEvaluate()) {
return Mono.justOrEmpty(null);
} else if (feature.getEnabledFor().size() == 0) {
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : feature.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature.getKey(), featureContext));
}
}
Mono<Boolean> isEnabled;
if (ALL_REQUIREMENT_TYPE.equals(feature.getRequirementType())) {
isEnabled = Flux.merge(results).reduce((a, b) -> a && b).single();
} else {
isEnabled = Flux.merge(results).reduce((a, b) -> a || b).single();
}
return isEnabled.flatMap(enabled -> {
if (!enabled && StringUtils.hasText(feature.getAllocation().getDefautlWhenDisabled())) {
return variantAssignment.getVariant(feature.getVariants().values(),
feature.getAllocation().getDefautlWhenDisabled()).single();
} else if (!enabled) {
return Mono.justOrEmpty(null);
}
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
});
}
private void validateVariant(Feature feature, String featureName) {
if (feature.getVariants() == null || feature.getVariants().size() == 0) {
throw new FeatureManagementException("The feature " + feature.getKey() + " has no assigned Variants.");
}
for (VariantReference variant : feature.getVariants().values()) {
if (!StringUtils.hasText(variant.getName())) {
throw new FeatureManagementException("Variant needs a name");
}
if (variant.getConfigurationValue() == null && variant.getConfigurationReference() == null) {
throw new FeatureManagementException(
"The feature " + feature.getKey() + " neededs a Configuration Value or Configuration Reference.");
}
}
}
private Mono<Boolean> isFeatureOn(FeatureFilterEvaluationContext filter, String feature, Object featureContext) {
try {
Object featureFilter = context.getBean(filter.getName());
filter.setFeatureName(feature);
if (featureFilter instanceof FeatureFilter) {
return Mono.just(((FeatureFilter) featureFilter).evaluate(filter));
} else if (featureFilter instanceof ContextualFeatureFilter) {
return Mono.just(((ContextualFeatureFilter) featureFilter).evaluate(filter, featureContext));
} else if (featureFilter instanceof FeatureFilterAsync) {
return ((FeatureFilterAsync) featureFilter).evaluateAsync(filter);
} else if (featureFilter instanceof ContextualFeatureFilterAsync) {
return ((ContextualFeatureFilterAsync) featureFilter).evaluateAsync(filter, featureContext);
}
} catch (NoSuchBeanDefinitionException e) {
LOGGER.error("Was unable to find Filter {}. Does the class exist and set as an @Component?",
filter.getName());
if (properties.isFailFast()) {
String message = "Fail fast is set and a Filter was unable to be found";
ReflectionUtils.rethrowRuntimeException(new FilterNotFoundException(message, e, filter));
}
}
return Mono.just(false);
}
/**
* Returns the names of all features flags
*
* @return a set of all feature names
*/
public Set<String> getAllFeatureNames() {
Set<String> allFeatures = new HashSet<>();
allFeatures.addAll(featureManagementConfigurations.getOnOff().keySet());
allFeatures.addAll(featureManagementConfigurations.getFeatureManagement().keySet());
return allFeatures;
}
/**
* @return the featureManagement
*/
Map<String, Feature> getFeatureManagement() {
return featureManagementConfigurations.getFeatureManagement();
}
/**
* @return the onOff
*/
Map<String, Boolean> getOnOff() {
return featureManagementConfigurations.getOnOff();
}
} | class FeatureManager {
private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManager.class);
private transient ApplicationContext context;
private final FeatureManagementProperties featureManagementConfigurations;
private transient FeatureManagementConfigProperties properties;
private final TargetingContextAccessor contextAccessor;
private final TargetingEvaluationOptions evaluationOptions;
private final ObjectProvider<VariantProperties> propertiesProvider;
/**
* Can be called to check if a feature is enabled or disabled.
*
* @param context ApplicationContext
* @param featureManagementConfigurations Configuration Properties for Feature Flags
* @param properties FeatureManagementConfigProperties
*/
FeatureManager(ApplicationContext context, FeatureManagementProperties featureManagementConfigurations,
FeatureManagementConfigProperties properties, TargetingContextAccessor contextAccessor,
TargetingEvaluationOptions evaluationOptions, ObjectProvider<VariantProperties> propertiesProvider) {
this.context = context;
this.featureManagementConfigurations = featureManagementConfigurations;
this.properties = properties;
this.contextAccessor = contextAccessor;
this.evaluationOptions = evaluationOptions;
this.propertiesProvider = propertiesProvider;
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Mono<Boolean> isEnabledAsync(String feature) {
return checkFeature(feature, null);
}
/**
* Checks to see if the feature is enabled. If enabled it check each filter, once a single filter returns true it
* returns true. If no filter returns true, it returns false. If there are no filters, it returns true. If feature
* isn't found it returns false.
*
* @param feature Feature being checked.
* @return state of the feature
* @throws FilterNotFoundException file not found
*/
public Boolean isEnabled(String feature) throws FilterNotFoundException {
return checkFeature(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Variant getVariant(String feature) {
return generateVariant(feature, null).block();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Variant getVariant(String feature, Object featureContext) {
return generateVariant(feature, featureContext).block();
}
/**
*
* @param feature Feature being checked.
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature) {
return generateVariant(feature, null).single();
}
/**
*
* @param feature Feature being checked.
* @param featureContext Local context
* @return Assigned Variant
*/
public Mono<Variant> getVariantAsync(String feature, Object featureContext) {
return generateVariant(feature, featureContext).single();
}
private Mono<Variant> generateVariant(String featureName, Object featureContext) {
if (!StringUtils.hasText(featureName)) {
throw new IllegalArgumentException("Feature Variant name can not be empty or null.");
}
Feature feature = featureManagementConfigurations.getFeatureManagement().get(featureName);
if (feature == null) {
throw new FeatureManagementException("The Feature " + featureName + " can not be found.");
}
validateVariant(feature);
VariantAssignment variantAssignment = new VariantAssignment(contextAccessor, evaluationOptions,
propertiesProvider);
String defaultDisabledVariant = feature.getAllocation().getDefaultWhenDisabled();
if (!feature.getEvaluate() && StringUtils.hasText(defaultDisabledVariant)) {
return variantAssignment.getVariant(feature.getVariants().values(), defaultDisabledVariant).single();
} else if (!feature.getEvaluate()) {
return Mono.justOrEmpty(null);
} else if (feature.getEnabledFor().size() == 0) {
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
}
List<Mono<Boolean>> results = new ArrayList<>();
for (FeatureFilterEvaluationContext featureFilter : feature.getEnabledFor().values()) {
if (StringUtils.hasText(featureFilter.getName())) {
results.add(isFeatureOn(featureFilter, feature.getKey(), featureContext));
}
}
return evaluateFeatureFlagResults(feature, results).flatMap(enabled -> {
if (!enabled && StringUtils.hasText(defaultDisabledVariant)) {
return variantAssignment.getVariant(feature.getVariants().values(), defaultDisabledVariant).single();
} else if (!enabled) {
return Mono.justOrEmpty(null);
}
return variantAssignment.assignVariant(feature.getAllocation(), feature.getVariants().values());
});
}
private Mono<Boolean> evaluateFeatureFlagResults(Feature feature, List<Mono<Boolean>> results) {
if (ALL_REQUIREMENT_TYPE.equals(feature.getRequirementType())) {
return Flux.merge(results).reduce((a, b) -> a && b).single();
}
return Flux.merge(results).reduce((a, b) -> a || b).single();
}
private void validateVariant(Feature feature) {
if (feature.getVariants() == null || feature.getVariants().size() == 0) {
throw new FeatureManagementException("The feature " + feature.getKey() + " has no assigned Variants.");
}
for (VariantReference variant : feature.getVariants().values()) {
if (!StringUtils.hasText(variant.getName())) {
throw new FeatureManagementException("Variant needs a name");
}
if (variant.getConfigurationValue() == null && variant.getConfigurationReference() == null) {
throw new FeatureManagementException(
"The feature " + feature.getKey() + " needs a Configuration Value or Configuration Reference.");
}
}
}
private Mono<Boolean> isFeatureOn(FeatureFilterEvaluationContext filter, String feature, Object featureContext) {
try {
Object featureFilter = context.getBean(filter.getName());
filter.setFeatureName(feature);
if (featureFilter instanceof FeatureFilter) {
return Mono.just(((FeatureFilter) featureFilter).evaluate(filter));
} else if (featureFilter instanceof ContextualFeatureFilter) {
return Mono.just(((ContextualFeatureFilter) featureFilter).evaluate(filter, featureContext));
} else if (featureFilter instanceof FeatureFilterAsync) {
return ((FeatureFilterAsync) featureFilter).evaluateAsync(filter);
} else if (featureFilter instanceof ContextualFeatureFilterAsync) {
return ((ContextualFeatureFilterAsync) featureFilter).evaluateAsync(filter, featureContext);
}
} catch (NoSuchBeanDefinitionException e) {
LOGGER.error("Was unable to find Filter {}. Does the class exist and set as an @Component?",
filter.getName());
if (properties.isFailFast()) {
String message = "Fail fast is set and a Filter was unable to be found";
ReflectionUtils.rethrowRuntimeException(new FilterNotFoundException(message, e, filter));
}
}
return Mono.just(false);
}
/**
* Returns the names of all features flags
*
* @return a set of all feature names
*/
public Set<String> getAllFeatureNames() {
Set<String> allFeatures = new HashSet<>();
allFeatures.addAll(featureManagementConfigurations.getOnOff().keySet());
allFeatures.addAll(featureManagementConfigurations.getFeatureManagement().keySet());
return allFeatures;
}
/**
* @return the featureManagement
*/
Map<String, Feature> getFeatureManagement() {
return featureManagementConfigurations.getFeatureManagement();
}
/**
* @return the onOff
*/
Map<String, Boolean> getOnOff() {
return featureManagementConfigurations.getOnOff();
}
} |
What was the cause of the test failures? Was mockito incorrectly mocking this? | protected void beforeTest() {
String endpoint;
TokenCredential tokenCredential;
if (interceptorManager.isPlaybackMode()) {
tokenCredential = new MockTokenCredential();
schemaGroup = PLAYBACK_TEST_GROUP;
endpoint = PLAYBACK_ENDPOINT;
eventHubName = "javaeventhub";
connectionString = "foo-bar";
} else {
tokenCredential = new DefaultAzureCredentialBuilder().build();
endpoint = System.getenv(SCHEMA_REGISTRY_AVRO_FULLY_QUALIFIED_NAMESPACE);
eventHubName = System.getenv(SCHEMA_REGISTRY_AVRO_EVENT_HUB_NAME);
schemaGroup = System.getenv(SCHEMA_REGISTRY_GROUP);
connectionString = System.getenv(SCHEMA_REGISTRY_AVRO_EVENT_HUB_CONNECTION_STRING);
assertNotNull(eventHubName, "'eventHubName' cannot be null in LIVE/RECORD mode.");
assertNotNull(endpoint, "'endpoint' cannot be null in LIVE/RECORD mode.");
assertNotNull(schemaGroup, "'schemaGroup' cannot be null in LIVE/RECORD mode.");
assertNotNull(connectionString, "'connectionString' cannot be null in LIVE/RECORD mode.");
}
builder = new SchemaRegistryClientBuilder()
.credential(tokenCredential)
.fullyQualifiedNamespace(endpoint);
if (interceptorManager.isPlaybackMode()) {
builder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
} | tokenCredential = new MockTokenCredential(); | protected void beforeTest() {
String endpoint;
TokenCredential tokenCredential;
if (interceptorManager.isPlaybackMode()) {
tokenCredential = new MockTokenCredential();
schemaGroup = PLAYBACK_TEST_GROUP;
endpoint = PLAYBACK_ENDPOINT;
eventHubName = "javaeventhub";
connectionString = "foo-bar";
} else {
tokenCredential = new DefaultAzureCredentialBuilder().build();
endpoint = System.getenv(SCHEMA_REGISTRY_AVRO_FULLY_QUALIFIED_NAMESPACE);
eventHubName = System.getenv(SCHEMA_REGISTRY_AVRO_EVENT_HUB_NAME);
schemaGroup = System.getenv(SCHEMA_REGISTRY_GROUP);
connectionString = System.getenv(SCHEMA_REGISTRY_AVRO_EVENT_HUB_CONNECTION_STRING);
assertNotNull(eventHubName, "'eventHubName' cannot be null in LIVE/RECORD mode.");
assertNotNull(endpoint, "'endpoint' cannot be null in LIVE/RECORD mode.");
assertNotNull(schemaGroup, "'schemaGroup' cannot be null in LIVE/RECORD mode.");
assertNotNull(connectionString, "'connectionString' cannot be null in LIVE/RECORD mode.");
}
builder = new SchemaRegistryClientBuilder()
.credential(tokenCredential)
.fullyQualifiedNamespace(endpoint);
if (interceptorManager.isPlaybackMode()) {
builder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
} | class SchemaRegistryApacheAvroSerializerIntegrationTest extends TestProxyTestBase {
static final String SCHEMA_REGISTRY_AVRO_FULLY_QUALIFIED_NAMESPACE = "SCHEMA_REGISTRY_AVRO_FULLY_QUALIFIED_NAMESPACE";
static final String SCHEMA_REGISTRY_GROUP = "SCHEMA_REGISTRY_GROUP";
static final String SCHEMA_REGISTRY_AVRO_EVENT_HUB_NAME = "SCHEMA_REGISTRY_AVRO_EVENT_HUB_NAME";
static final String SCHEMA_REGISTRY_AVRO_EVENT_HUB_CONNECTION_STRING = "SCHEMA_REGISTRY_AVRO_EVENT_HUB_CONNECTION_STRING";
static final String PLAYBACK_TEST_GROUP = "azsdk_java_group";
static final String PLAYBACK_ENDPOINT = "https:
private String schemaGroup;
private SchemaRegistryClientBuilder builder;
private String eventHubName;
private String connectionString;
@Override
@Override
protected void afterTest() {
Mockito.framework().clearInlineMock(this);
}
/**
* Verifies that we can register a schema, fetch it, and deserialize it.
*/
@Test
public void registerAndGetSchema() {
final SchemaRegistryClient registryClient = builder.buildClient();
final SchemaRegistryApacheAvroSerializer encoder = new SchemaRegistryApacheAvroSerializerBuilder()
.schemaGroup(schemaGroup)
.schemaRegistryClient(builder.buildAsyncClient())
.avroSpecificReader(true)
.buildSerializer();
final PlayingCard playingCard = PlayingCard.newBuilder()
.setCardValue(1)
.setPlayingCardSuit(PlayingCardSuit.SPADES)
.setIsFaceCard(false)
.build();
final PlayingCard playingCard2 = PlayingCard.newBuilder()
.setCardValue(11)
.setIsFaceCard(true)
.setPlayingCardSuit(PlayingCardSuit.DIAMONDS)
.build();
final ArrayList<PlayingCard> allCards = new ArrayList<>();
allCards.add(playingCard);
allCards.add(playingCard2);
final HandOfCards cards = HandOfCards.newBuilder()
.setCards(allCards)
.build();
final Schema handOfCardsSchema = HandOfCards.SCHEMA$;
final SchemaProperties schemaProperties = registryClient.registerSchema(schemaGroup,
handOfCardsSchema.getFullName(), handOfCardsSchema.toString(), SchemaFormat.AVRO);
assertNotNull(schemaProperties);
final MessageContent encodedMessage = encoder.serialize(cards,
TypeReference.createInstance(MessageContent.class));
assertNotNull(encodedMessage);
final byte[] outputArray = encodedMessage.getBodyAsBinaryData().toBytes();
assertTrue(outputArray.length > 0, "There should have been contents in array.");
final HandOfCards actual = encoder.deserialize(encodedMessage,
TypeReference.createInstance(HandOfCards.class));
assertNotNull(actual);
assertNotNull(actual.getCards());
assertEquals(cards.getCards().size(), actual.getCards().size());
}
/**
* Verifies that an event can be sent to Event Hubs and deserialized.
*/
@Test
public void serializeAndDeserializeEventData() {
Assumptions.assumeFalse(interceptorManager.isPlaybackMode(),
"Cannot run this test in playback mode because it uses AMQP and Event Hubs calls.");
final SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder()
.schemaGroup(schemaGroup)
.schemaRegistryClient(builder.buildAsyncClient())
.autoRegisterSchemas(true)
.avroSpecificReader(true)
.buildSerializer();
final PlayingCard playingCard = PlayingCard.newBuilder()
.setCardValue(1)
.setPlayingCardSuit(PlayingCardSuit.SPADES)
.setIsFaceCard(false)
.build();
final String uuid = UUID.randomUUID().toString();
final String applicationKey = "SCHEMA_REGISTRY_KEY";
final EventData event = serializer.serialize(playingCard, TypeReference.createInstance(EventData.class));
final String partitionId = "0";
event.getProperties().put(applicationKey, uuid);
EventHubProducerClient producer = null;
EventHubConsumerAsyncClient consumer = null;
try {
producer = new EventHubClientBuilder()
.connectionString(connectionString, eventHubName)
.buildProducerClient();
consumer = new EventHubClientBuilder()
.connectionString(connectionString, eventHubName)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
final PartitionProperties partitionProperties = producer.getPartitionProperties(partitionId);
final EventPosition last = EventPosition.fromSequenceNumber(partitionProperties.getLastEnqueuedSequenceNumber());
producer.send(Collections.singleton(event), new SendOptions().setPartitionId(partitionId));
StepVerifier.create(consumer.receiveFromPartition(partitionId, last).publishOn(Schedulers.boundedElastic()))
.assertNext(partitionEvent -> {
final PlayingCard deserialize = serializer.deserialize(partitionEvent.getData(),
TypeReference.createInstance(PlayingCard.class));
assertEquals(playingCard, deserialize);
})
.thenCancel()
.verify(Duration.ofMinutes(2));
} finally {
if (producer != null) {
producer.close();
}
if (consumer != null) {
consumer.close();
}
}
}
/**
* Tests that we auto-register and use cached versions.
*/
@Test
public void autoRegisterSchema() {
final SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder()
.schemaGroup(schemaGroup)
.schemaRegistryClient(builder.buildAsyncClient())
.avroSpecificReader(true)
.autoRegisterSchemas(true)
.buildSerializer();
final Person person = Person.newBuilder()
.setFavouriteColour("Blue")
.setFavouriteNumber(10)
.setName("Joe")
.build();
final MessageContent message = serializer.serialize(person, TypeReference.createInstance(MessageContent.class));
assertNotNull(message);
final MessageContent message2 = serializer.serialize(person, TypeReference.createInstance(MessageContent.class));
assertNotNull(message2);
final Person deserialized = serializer.deserialize(message, TypeReference.createInstance(Person.class));
assertEquals(person, deserialized);
}
} | class SchemaRegistryApacheAvroSerializerIntegrationTest extends TestProxyTestBase {
static final String SCHEMA_REGISTRY_AVRO_FULLY_QUALIFIED_NAMESPACE = "SCHEMA_REGISTRY_AVRO_FULLY_QUALIFIED_NAMESPACE";
static final String SCHEMA_REGISTRY_GROUP = "SCHEMA_REGISTRY_GROUP";
static final String SCHEMA_REGISTRY_AVRO_EVENT_HUB_NAME = "SCHEMA_REGISTRY_AVRO_EVENT_HUB_NAME";
static final String SCHEMA_REGISTRY_AVRO_EVENT_HUB_CONNECTION_STRING = "SCHEMA_REGISTRY_AVRO_EVENT_HUB_CONNECTION_STRING";
static final String PLAYBACK_TEST_GROUP = "azsdk_java_group";
static final String PLAYBACK_ENDPOINT = "https:
private String schemaGroup;
private SchemaRegistryClientBuilder builder;
private String eventHubName;
private String connectionString;
@Override
@Override
protected void afterTest() {
Mockito.framework().clearInlineMock(this);
}
/**
* Verifies that we can register a schema, fetch it, and deserialize it.
*/
@Test
public void registerAndGetSchema() {
final SchemaRegistryClient registryClient = builder.buildClient();
final SchemaRegistryApacheAvroSerializer encoder = new SchemaRegistryApacheAvroSerializerBuilder()
.schemaGroup(schemaGroup)
.schemaRegistryClient(builder.buildAsyncClient())
.avroSpecificReader(true)
.buildSerializer();
final PlayingCard playingCard = PlayingCard.newBuilder()
.setCardValue(1)
.setPlayingCardSuit(PlayingCardSuit.SPADES)
.setIsFaceCard(false)
.build();
final PlayingCard playingCard2 = PlayingCard.newBuilder()
.setCardValue(11)
.setIsFaceCard(true)
.setPlayingCardSuit(PlayingCardSuit.DIAMONDS)
.build();
final ArrayList<PlayingCard> allCards = new ArrayList<>();
allCards.add(playingCard);
allCards.add(playingCard2);
final HandOfCards cards = HandOfCards.newBuilder()
.setCards(allCards)
.build();
final Schema handOfCardsSchema = HandOfCards.SCHEMA$;
final SchemaProperties schemaProperties = registryClient.registerSchema(schemaGroup,
handOfCardsSchema.getFullName(), handOfCardsSchema.toString(), SchemaFormat.AVRO);
assertNotNull(schemaProperties);
final MessageContent encodedMessage = encoder.serialize(cards,
TypeReference.createInstance(MessageContent.class));
assertNotNull(encodedMessage);
final byte[] outputArray = encodedMessage.getBodyAsBinaryData().toBytes();
assertTrue(outputArray.length > 0, "There should have been contents in array.");
final HandOfCards actual = encoder.deserialize(encodedMessage,
TypeReference.createInstance(HandOfCards.class));
assertNotNull(actual);
assertNotNull(actual.getCards());
assertEquals(cards.getCards().size(), actual.getCards().size());
}
/**
* Verifies that an event can be sent to Event Hubs and deserialized.
*/
@Test
public void serializeAndDeserializeEventData() {
Assumptions.assumeFalse(interceptorManager.isPlaybackMode(),
"Cannot run this test in playback mode because it uses AMQP and Event Hubs calls.");
final SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder()
.schemaGroup(schemaGroup)
.schemaRegistryClient(builder.buildAsyncClient())
.autoRegisterSchemas(true)
.avroSpecificReader(true)
.buildSerializer();
final PlayingCard playingCard = PlayingCard.newBuilder()
.setCardValue(1)
.setPlayingCardSuit(PlayingCardSuit.SPADES)
.setIsFaceCard(false)
.build();
final String uuid = UUID.randomUUID().toString();
final String applicationKey = "SCHEMA_REGISTRY_KEY";
final EventData event = serializer.serialize(playingCard, TypeReference.createInstance(EventData.class));
final String partitionId = "0";
event.getProperties().put(applicationKey, uuid);
EventHubProducerClient producer = null;
EventHubConsumerAsyncClient consumer = null;
try {
producer = new EventHubClientBuilder()
.connectionString(connectionString, eventHubName)
.buildProducerClient();
consumer = new EventHubClientBuilder()
.connectionString(connectionString, eventHubName)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
final PartitionProperties partitionProperties = producer.getPartitionProperties(partitionId);
final EventPosition last = EventPosition.fromSequenceNumber(partitionProperties.getLastEnqueuedSequenceNumber());
producer.send(Collections.singleton(event), new SendOptions().setPartitionId(partitionId));
StepVerifier.create(consumer.receiveFromPartition(partitionId, last).publishOn(Schedulers.boundedElastic()))
.assertNext(partitionEvent -> {
final PlayingCard deserialize = serializer.deserialize(partitionEvent.getData(),
TypeReference.createInstance(PlayingCard.class));
assertEquals(playingCard, deserialize);
})
.thenCancel()
.verify(Duration.ofMinutes(2));
} finally {
if (producer != null) {
producer.close();
}
if (consumer != null) {
consumer.close();
}
}
}
/**
* Tests that we auto-register and use cached versions.
*/
@Test
public void autoRegisterSchema() {
final SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder()
.schemaGroup(schemaGroup)
.schemaRegistryClient(builder.buildAsyncClient())
.avroSpecificReader(true)
.autoRegisterSchemas(true)
.buildSerializer();
final Person person = Person.newBuilder()
.setFavouriteColour("Blue")
.setFavouriteNumber(10)
.setName("Joe")
.build();
final MessageContent message = serializer.serialize(person, TypeReference.createInstance(MessageContent.class));
assertNotNull(message);
final MessageContent message2 = serializer.serialize(person, TypeReference.createInstance(MessageContent.class));
assertNotNull(message2);
final Person deserialized = serializer.deserialize(message, TypeReference.createInstance(Person.class));
assertEquals(person, deserialized);
}
} |
The mock was fine, this is just a better version than the mock as we don't care about the credential response during playback other than it didn't throw an error. This change was just a piece of reducing places where we use Mockito as there is a lot of baggage with it, especially given Mockito is no longer actively developing with a Java 8 baseline. | protected void beforeTest() {
String endpoint;
TokenCredential tokenCredential;
if (interceptorManager.isPlaybackMode()) {
tokenCredential = new MockTokenCredential();
schemaGroup = PLAYBACK_TEST_GROUP;
endpoint = PLAYBACK_ENDPOINT;
eventHubName = "javaeventhub";
connectionString = "foo-bar";
} else {
tokenCredential = new DefaultAzureCredentialBuilder().build();
endpoint = System.getenv(SCHEMA_REGISTRY_AVRO_FULLY_QUALIFIED_NAMESPACE);
eventHubName = System.getenv(SCHEMA_REGISTRY_AVRO_EVENT_HUB_NAME);
schemaGroup = System.getenv(SCHEMA_REGISTRY_GROUP);
connectionString = System.getenv(SCHEMA_REGISTRY_AVRO_EVENT_HUB_CONNECTION_STRING);
assertNotNull(eventHubName, "'eventHubName' cannot be null in LIVE/RECORD mode.");
assertNotNull(endpoint, "'endpoint' cannot be null in LIVE/RECORD mode.");
assertNotNull(schemaGroup, "'schemaGroup' cannot be null in LIVE/RECORD mode.");
assertNotNull(connectionString, "'connectionString' cannot be null in LIVE/RECORD mode.");
}
builder = new SchemaRegistryClientBuilder()
.credential(tokenCredential)
.fullyQualifiedNamespace(endpoint);
if (interceptorManager.isPlaybackMode()) {
builder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
} | tokenCredential = new MockTokenCredential(); | protected void beforeTest() {
String endpoint;
TokenCredential tokenCredential;
if (interceptorManager.isPlaybackMode()) {
tokenCredential = new MockTokenCredential();
schemaGroup = PLAYBACK_TEST_GROUP;
endpoint = PLAYBACK_ENDPOINT;
eventHubName = "javaeventhub";
connectionString = "foo-bar";
} else {
tokenCredential = new DefaultAzureCredentialBuilder().build();
endpoint = System.getenv(SCHEMA_REGISTRY_AVRO_FULLY_QUALIFIED_NAMESPACE);
eventHubName = System.getenv(SCHEMA_REGISTRY_AVRO_EVENT_HUB_NAME);
schemaGroup = System.getenv(SCHEMA_REGISTRY_GROUP);
connectionString = System.getenv(SCHEMA_REGISTRY_AVRO_EVENT_HUB_CONNECTION_STRING);
assertNotNull(eventHubName, "'eventHubName' cannot be null in LIVE/RECORD mode.");
assertNotNull(endpoint, "'endpoint' cannot be null in LIVE/RECORD mode.");
assertNotNull(schemaGroup, "'schemaGroup' cannot be null in LIVE/RECORD mode.");
assertNotNull(connectionString, "'connectionString' cannot be null in LIVE/RECORD mode.");
}
builder = new SchemaRegistryClientBuilder()
.credential(tokenCredential)
.fullyQualifiedNamespace(endpoint);
if (interceptorManager.isPlaybackMode()) {
builder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
} | class SchemaRegistryApacheAvroSerializerIntegrationTest extends TestProxyTestBase {
static final String SCHEMA_REGISTRY_AVRO_FULLY_QUALIFIED_NAMESPACE = "SCHEMA_REGISTRY_AVRO_FULLY_QUALIFIED_NAMESPACE";
static final String SCHEMA_REGISTRY_GROUP = "SCHEMA_REGISTRY_GROUP";
static final String SCHEMA_REGISTRY_AVRO_EVENT_HUB_NAME = "SCHEMA_REGISTRY_AVRO_EVENT_HUB_NAME";
static final String SCHEMA_REGISTRY_AVRO_EVENT_HUB_CONNECTION_STRING = "SCHEMA_REGISTRY_AVRO_EVENT_HUB_CONNECTION_STRING";
static final String PLAYBACK_TEST_GROUP = "azsdk_java_group";
static final String PLAYBACK_ENDPOINT = "https:
private String schemaGroup;
private SchemaRegistryClientBuilder builder;
private String eventHubName;
private String connectionString;
@Override
@Override
protected void afterTest() {
Mockito.framework().clearInlineMock(this);
}
/**
* Verifies that we can register a schema, fetch it, and deserialize it.
*/
@Test
public void registerAndGetSchema() {
final SchemaRegistryClient registryClient = builder.buildClient();
final SchemaRegistryApacheAvroSerializer encoder = new SchemaRegistryApacheAvroSerializerBuilder()
.schemaGroup(schemaGroup)
.schemaRegistryClient(builder.buildAsyncClient())
.avroSpecificReader(true)
.buildSerializer();
final PlayingCard playingCard = PlayingCard.newBuilder()
.setCardValue(1)
.setPlayingCardSuit(PlayingCardSuit.SPADES)
.setIsFaceCard(false)
.build();
final PlayingCard playingCard2 = PlayingCard.newBuilder()
.setCardValue(11)
.setIsFaceCard(true)
.setPlayingCardSuit(PlayingCardSuit.DIAMONDS)
.build();
final ArrayList<PlayingCard> allCards = new ArrayList<>();
allCards.add(playingCard);
allCards.add(playingCard2);
final HandOfCards cards = HandOfCards.newBuilder()
.setCards(allCards)
.build();
final Schema handOfCardsSchema = HandOfCards.SCHEMA$;
final SchemaProperties schemaProperties = registryClient.registerSchema(schemaGroup,
handOfCardsSchema.getFullName(), handOfCardsSchema.toString(), SchemaFormat.AVRO);
assertNotNull(schemaProperties);
final MessageContent encodedMessage = encoder.serialize(cards,
TypeReference.createInstance(MessageContent.class));
assertNotNull(encodedMessage);
final byte[] outputArray = encodedMessage.getBodyAsBinaryData().toBytes();
assertTrue(outputArray.length > 0, "There should have been contents in array.");
final HandOfCards actual = encoder.deserialize(encodedMessage,
TypeReference.createInstance(HandOfCards.class));
assertNotNull(actual);
assertNotNull(actual.getCards());
assertEquals(cards.getCards().size(), actual.getCards().size());
}
/**
* Verifies that an event can be sent to Event Hubs and deserialized.
*/
@Test
public void serializeAndDeserializeEventData() {
Assumptions.assumeFalse(interceptorManager.isPlaybackMode(),
"Cannot run this test in playback mode because it uses AMQP and Event Hubs calls.");
final SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder()
.schemaGroup(schemaGroup)
.schemaRegistryClient(builder.buildAsyncClient())
.autoRegisterSchemas(true)
.avroSpecificReader(true)
.buildSerializer();
final PlayingCard playingCard = PlayingCard.newBuilder()
.setCardValue(1)
.setPlayingCardSuit(PlayingCardSuit.SPADES)
.setIsFaceCard(false)
.build();
final String uuid = UUID.randomUUID().toString();
final String applicationKey = "SCHEMA_REGISTRY_KEY";
final EventData event = serializer.serialize(playingCard, TypeReference.createInstance(EventData.class));
final String partitionId = "0";
event.getProperties().put(applicationKey, uuid);
EventHubProducerClient producer = null;
EventHubConsumerAsyncClient consumer = null;
try {
producer = new EventHubClientBuilder()
.connectionString(connectionString, eventHubName)
.buildProducerClient();
consumer = new EventHubClientBuilder()
.connectionString(connectionString, eventHubName)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
final PartitionProperties partitionProperties = producer.getPartitionProperties(partitionId);
final EventPosition last = EventPosition.fromSequenceNumber(partitionProperties.getLastEnqueuedSequenceNumber());
producer.send(Collections.singleton(event), new SendOptions().setPartitionId(partitionId));
StepVerifier.create(consumer.receiveFromPartition(partitionId, last).publishOn(Schedulers.boundedElastic()))
.assertNext(partitionEvent -> {
final PlayingCard deserialize = serializer.deserialize(partitionEvent.getData(),
TypeReference.createInstance(PlayingCard.class));
assertEquals(playingCard, deserialize);
})
.thenCancel()
.verify(Duration.ofMinutes(2));
} finally {
if (producer != null) {
producer.close();
}
if (consumer != null) {
consumer.close();
}
}
}
/**
* Tests that we auto-register and use cached versions.
*/
@Test
public void autoRegisterSchema() {
final SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder()
.schemaGroup(schemaGroup)
.schemaRegistryClient(builder.buildAsyncClient())
.avroSpecificReader(true)
.autoRegisterSchemas(true)
.buildSerializer();
final Person person = Person.newBuilder()
.setFavouriteColour("Blue")
.setFavouriteNumber(10)
.setName("Joe")
.build();
final MessageContent message = serializer.serialize(person, TypeReference.createInstance(MessageContent.class));
assertNotNull(message);
final MessageContent message2 = serializer.serialize(person, TypeReference.createInstance(MessageContent.class));
assertNotNull(message2);
final Person deserialized = serializer.deserialize(message, TypeReference.createInstance(Person.class));
assertEquals(person, deserialized);
}
} | class SchemaRegistryApacheAvroSerializerIntegrationTest extends TestProxyTestBase {
static final String SCHEMA_REGISTRY_AVRO_FULLY_QUALIFIED_NAMESPACE = "SCHEMA_REGISTRY_AVRO_FULLY_QUALIFIED_NAMESPACE";
static final String SCHEMA_REGISTRY_GROUP = "SCHEMA_REGISTRY_GROUP";
static final String SCHEMA_REGISTRY_AVRO_EVENT_HUB_NAME = "SCHEMA_REGISTRY_AVRO_EVENT_HUB_NAME";
static final String SCHEMA_REGISTRY_AVRO_EVENT_HUB_CONNECTION_STRING = "SCHEMA_REGISTRY_AVRO_EVENT_HUB_CONNECTION_STRING";
static final String PLAYBACK_TEST_GROUP = "azsdk_java_group";
static final String PLAYBACK_ENDPOINT = "https:
private String schemaGroup;
private SchemaRegistryClientBuilder builder;
private String eventHubName;
private String connectionString;
@Override
@Override
protected void afterTest() {
Mockito.framework().clearInlineMock(this);
}
/**
* Verifies that we can register a schema, fetch it, and deserialize it.
*/
@Test
public void registerAndGetSchema() {
final SchemaRegistryClient registryClient = builder.buildClient();
final SchemaRegistryApacheAvroSerializer encoder = new SchemaRegistryApacheAvroSerializerBuilder()
.schemaGroup(schemaGroup)
.schemaRegistryClient(builder.buildAsyncClient())
.avroSpecificReader(true)
.buildSerializer();
final PlayingCard playingCard = PlayingCard.newBuilder()
.setCardValue(1)
.setPlayingCardSuit(PlayingCardSuit.SPADES)
.setIsFaceCard(false)
.build();
final PlayingCard playingCard2 = PlayingCard.newBuilder()
.setCardValue(11)
.setIsFaceCard(true)
.setPlayingCardSuit(PlayingCardSuit.DIAMONDS)
.build();
final ArrayList<PlayingCard> allCards = new ArrayList<>();
allCards.add(playingCard);
allCards.add(playingCard2);
final HandOfCards cards = HandOfCards.newBuilder()
.setCards(allCards)
.build();
final Schema handOfCardsSchema = HandOfCards.SCHEMA$;
final SchemaProperties schemaProperties = registryClient.registerSchema(schemaGroup,
handOfCardsSchema.getFullName(), handOfCardsSchema.toString(), SchemaFormat.AVRO);
assertNotNull(schemaProperties);
final MessageContent encodedMessage = encoder.serialize(cards,
TypeReference.createInstance(MessageContent.class));
assertNotNull(encodedMessage);
final byte[] outputArray = encodedMessage.getBodyAsBinaryData().toBytes();
assertTrue(outputArray.length > 0, "There should have been contents in array.");
final HandOfCards actual = encoder.deserialize(encodedMessage,
TypeReference.createInstance(HandOfCards.class));
assertNotNull(actual);
assertNotNull(actual.getCards());
assertEquals(cards.getCards().size(), actual.getCards().size());
}
/**
* Verifies that an event can be sent to Event Hubs and deserialized.
*/
@Test
public void serializeAndDeserializeEventData() {
Assumptions.assumeFalse(interceptorManager.isPlaybackMode(),
"Cannot run this test in playback mode because it uses AMQP and Event Hubs calls.");
final SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder()
.schemaGroup(schemaGroup)
.schemaRegistryClient(builder.buildAsyncClient())
.autoRegisterSchemas(true)
.avroSpecificReader(true)
.buildSerializer();
final PlayingCard playingCard = PlayingCard.newBuilder()
.setCardValue(1)
.setPlayingCardSuit(PlayingCardSuit.SPADES)
.setIsFaceCard(false)
.build();
final String uuid = UUID.randomUUID().toString();
final String applicationKey = "SCHEMA_REGISTRY_KEY";
final EventData event = serializer.serialize(playingCard, TypeReference.createInstance(EventData.class));
final String partitionId = "0";
event.getProperties().put(applicationKey, uuid);
EventHubProducerClient producer = null;
EventHubConsumerAsyncClient consumer = null;
try {
producer = new EventHubClientBuilder()
.connectionString(connectionString, eventHubName)
.buildProducerClient();
consumer = new EventHubClientBuilder()
.connectionString(connectionString, eventHubName)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
final PartitionProperties partitionProperties = producer.getPartitionProperties(partitionId);
final EventPosition last = EventPosition.fromSequenceNumber(partitionProperties.getLastEnqueuedSequenceNumber());
producer.send(Collections.singleton(event), new SendOptions().setPartitionId(partitionId));
StepVerifier.create(consumer.receiveFromPartition(partitionId, last).publishOn(Schedulers.boundedElastic()))
.assertNext(partitionEvent -> {
final PlayingCard deserialize = serializer.deserialize(partitionEvent.getData(),
TypeReference.createInstance(PlayingCard.class));
assertEquals(playingCard, deserialize);
})
.thenCancel()
.verify(Duration.ofMinutes(2));
} finally {
if (producer != null) {
producer.close();
}
if (consumer != null) {
consumer.close();
}
}
}
/**
* Tests that we auto-register and use cached versions.
*/
@Test
public void autoRegisterSchema() {
final SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder()
.schemaGroup(schemaGroup)
.schemaRegistryClient(builder.buildAsyncClient())
.avroSpecificReader(true)
.autoRegisterSchemas(true)
.buildSerializer();
final Person person = Person.newBuilder()
.setFavouriteColour("Blue")
.setFavouriteNumber(10)
.setName("Joe")
.build();
final MessageContent message = serializer.serialize(person, TypeReference.createInstance(MessageContent.class));
assertNotNull(message);
final MessageContent message2 = serializer.serialize(person, TypeReference.createInstance(MessageContent.class));
assertNotNull(message2);
final Person deserialized = serializer.deserialize(message, TypeReference.createInstance(Person.class));
assertEquals(person, deserialized);
}
} |
I should have done a better job before inspecting this but we should assert on the request that the Authorization header contains "SharedKey" to really verify there are no false positives in this test with authenticating shared key. | public void testPoolCRUD() {
try {
/*
* Creating Pool
* */
ImageReference imgRef = new ImageReference().setPublisher("Canonical").setOffer("UbuntuServer")
.setSku("18.04-LTS").setVersion("latest");
VirtualMachineConfiguration configuration = new VirtualMachineConfiguration(imgRef, nodeAgentSkuId);
BatchPoolCreateOptions poolCreateOptions = new BatchPoolCreateOptions(sharedKeyPoolId, vmSize);
poolCreateOptions.setTargetDedicatedNodes(2)
.setVirtualMachineConfiguration(configuration)
.setTargetNodeCommunicationMode(NodeCommunicationMode.DEFAULT);
batchClient.createPool(poolCreateOptions);
/*
* Getting Pool
*/
Assertions.assertTrue(poolExists(batchClient, sharedKeyPoolId));
BatchPool pool = batchClient.getPool(sharedKeyPoolId);
Assertions.assertEquals(pool.getId(), sharedKeyPoolId);
Assertions.assertEquals(pool.getVirtualMachineConfiguration().getNodeAgentSkuId(), nodeAgentSkuId);
Assertions.assertEquals(vmSize.toLowerCase(), pool.getVmSize().toLowerCase());
/*
* Updating Pool
*/
ArrayList<MetadataItem> updatedMetadata = new ArrayList<MetadataItem>();
updatedMetadata.add(new MetadataItem("foo", "bar"));
BatchPoolUpdateOptions poolUpdateOptions = new BatchPoolUpdateOptions();
poolUpdateOptions.setCertificateReferences(new ArrayList<>())
.setApplicationPackageReferences(new ArrayList<>())
.setMetadata(updatedMetadata);
poolUpdateOptions.setTargetNodeCommunicationMode(NodeCommunicationMode.SIMPLIFIED);
batchClient.updatePool(sharedKeyPoolId, poolUpdateOptions);
pool = batchClient.getPool(sharedKeyPoolId);
Assertions.assertEquals(NodeCommunicationMode.SIMPLIFIED, pool.getTargetNodeCommunicationMode());
List<MetadataItem> metadata = pool.getMetadata();
Assertions.assertTrue(metadata.size() == 1 && metadata.get(0).getName().equals("foo"));
/*
* Update Pool
*/
updatedMetadata.clear();
updatedMetadata.add(new MetadataItem("key1", "value1"));
BatchPoolUpdateOptions poolUpdateOptions2 = new BatchPoolUpdateOptions().setMetadata(updatedMetadata).setTargetNodeCommunicationMode(NodeCommunicationMode.CLASSIC);
Response<Void> updatePoolResponse = batchClient.updatePoolWithResponse(sharedKeyPoolId, BinaryData.fromObject(poolUpdateOptions2), null);
HttpRequest updatePoolRequest = updatePoolResponse.getRequest();
HttpHeader ocpDateHeader = updatePoolRequest.getHeaders().get(HttpHeaderName.fromString("ocp-date"));
Assertions.assertNull(ocpDateHeader);
HttpHeader dateHeader = updatePoolRequest.getHeaders().get(HttpHeaderName.DATE);
Assertions.assertNotNull(dateHeader);
/*
* Get Pool With ocp-Date header
* */
RequestOptions requestOptions = new RequestOptions();
requestOptions.setHeader(HttpHeaderName.fromString("ocp-date"), new DateTimeRfc1123(now()).toString());
Response<BinaryData> poolGetResponse = batchClient.getPoolWithResponse(sharedKeyPoolId, requestOptions);
HttpRequest getPoolRequest = poolGetResponse.getRequest();
ocpDateHeader = getPoolRequest.getHeaders().get(HttpHeaderName.fromString("ocp-date"));
Assertions.assertNotNull(ocpDateHeader);
Assertions.assertTrue(!ocpDateHeader.getValue().isEmpty());
pool = poolGetResponse.getValue().toObject(BatchPool.class);
Assertions.assertEquals(NodeCommunicationMode.CLASSIC, pool.getTargetNodeCommunicationMode());
metadata = pool.getMetadata();
Assertions.assertTrue(metadata.size() == 1 && metadata.get(0).getName().equals("key1"));
}
finally {
/*
* Deleting Pool
* */
batchClient.deletePool(sharedKeyPoolId);
}
} | /* | public void testPoolCRUD() {
try {
/*
* Creating Pool
* */
ImageReference imgRef = new ImageReference().setPublisher("Canonical").setOffer("UbuntuServer")
.setSku("18.04-LTS").setVersion("latest");
VirtualMachineConfiguration configuration = new VirtualMachineConfiguration(imgRef, nodeAgentSkuId);
BatchPoolCreateOptions poolCreateOptions = new BatchPoolCreateOptions(sharedKeyPoolId, vmSize);
poolCreateOptions.setTargetDedicatedNodes(2)
.setVirtualMachineConfiguration(configuration)
.setTargetNodeCommunicationMode(NodeCommunicationMode.DEFAULT);
Response<Void> response = batchClientWithSharedKey.createPoolWithResponse(BinaryData.fromObject(poolCreateOptions), null);
String authorizationValue = response.getRequest().getHeaders().getValue(HttpHeaderName.AUTHORIZATION);
Assertions.assertTrue(authorizationValue.contains("SharedKey"), "Test is not using SharedKey authentication");
/*
* Getting Pool
*/
Assertions.assertTrue(poolExists(batchClientWithSharedKey, sharedKeyPoolId));
BatchPool pool = batchClientWithSharedKey.getPool(sharedKeyPoolId);
Assertions.assertEquals(pool.getId(), sharedKeyPoolId);
Assertions.assertEquals(pool.getVirtualMachineConfiguration().getNodeAgentSkuId(), nodeAgentSkuId);
Assertions.assertEquals(vmSize.toLowerCase(), pool.getVmSize().toLowerCase());
/*
* Replacing Pool Properties
*/
ArrayList<MetadataItem> updatedMetadata = new ArrayList<MetadataItem>();
updatedMetadata.add(new MetadataItem("foo", "bar"));
BatchPoolReplaceOptions poolReplaceOptions = new BatchPoolReplaceOptions(new ArrayList<>(), new ArrayList<>(), updatedMetadata);
poolReplaceOptions.setTargetNodeCommunicationMode(NodeCommunicationMode.SIMPLIFIED);
batchClientWithSharedKey.replacePoolProperties(sharedKeyPoolId, poolReplaceOptions);
pool = batchClientWithSharedKey.getPool(sharedKeyPoolId);
Assertions.assertEquals(NodeCommunicationMode.SIMPLIFIED, pool.getTargetNodeCommunicationMode());
List<MetadataItem> metadata = pool.getMetadata();
Assertions.assertTrue(metadata.size() == 1 && metadata.get(0).getName().equals("foo"));
/*
* Update Pool
*/
updatedMetadata.clear();
updatedMetadata.add(new MetadataItem("key1", "value1"));
BatchPoolUpdateOptions poolUpdateOptions = new BatchPoolUpdateOptions().setMetadata(updatedMetadata).setTargetNodeCommunicationMode(NodeCommunicationMode.CLASSIC);
Response<Void> updatePoolResponse = batchClientWithSharedKey.updatePoolWithResponse(sharedKeyPoolId, BinaryData.fromObject(poolUpdateOptions), null);
HttpRequest updatePoolRequest = updatePoolResponse.getRequest();
HttpHeader ocpDateHeader = updatePoolRequest.getHeaders().get(HttpHeaderName.fromString("ocp-date"));
Assertions.assertNull(ocpDateHeader);
HttpHeader dateHeader = updatePoolRequest.getHeaders().get(HttpHeaderName.DATE);
Assertions.assertNotNull(dateHeader);
authorizationValue = updatePoolRequest.getHeaders().getValue(HttpHeaderName.AUTHORIZATION);
Assertions.assertTrue(authorizationValue.contains("SharedKey"), "Test is not using SharedKey authentication");
/*
* Get Pool With ocp-Date header
* */
RequestOptions requestOptions = new RequestOptions();
requestOptions.setHeader(HttpHeaderName.fromString("ocp-date"), new DateTimeRfc1123(now()).toString());
Response<BinaryData> poolGetResponse = batchClientWithSharedKey.getPoolWithResponse(sharedKeyPoolId, requestOptions);
HttpRequest getPoolRequest = poolGetResponse.getRequest();
ocpDateHeader = getPoolRequest.getHeaders().get(HttpHeaderName.fromString("ocp-date"));
Assertions.assertNotNull(ocpDateHeader);
Assertions.assertTrue(!ocpDateHeader.getValue().isEmpty());
pool = poolGetResponse.getValue().toObject(BatchPool.class);
authorizationValue = getPoolRequest.getHeaders().getValue(HttpHeaderName.AUTHORIZATION);
Assertions.assertTrue(authorizationValue.contains("SharedKey"), "Test is not using SharedKey authentication");
Assertions.assertEquals(NodeCommunicationMode.CLASSIC, pool.getTargetNodeCommunicationMode());
metadata = pool.getMetadata();
Assertions.assertTrue(metadata.size() == 1 && metadata.get(0).getName().equals("key1"));
}
finally {
/*
* Deleting Pool
* */
batchClientWithSharedKey.deletePool(sharedKeyPoolId);
}
} | class SharedKeyTests extends BatchServiceClientTestBase {
private final String sharedKeyPoolId = "SharedKey-testpool";
private final String vmSize = "STANDARD_D1_V2";
private final String nodeAgentSkuId = "batch.node.ubuntu 18.04";
@Override
protected void beforeTest() {
super.beforeTest();
AzureNamedKeyCredential sharedKeyCred = getSharedKeyCredentials();
batchClientBuilder.credential(sharedKeyCred);
}
@Test
} | class SharedKeyTests extends BatchServiceClientTestBase {
private static BatchClient batchClientWithSharedKey;
private final String sharedKeyPoolId = "SharedKey-testpool";
private final String vmSize = "STANDARD_D1_V2";
private final String nodeAgentSkuId = "batch.node.ubuntu 18.04";
@Override
protected void beforeTest() {
super.beforeTest();
AzureNamedKeyCredential sharedKeyCred = getSharedKeyCredentials();
batchClientBuilder.credential(sharedKeyCred);
batchClientWithSharedKey = batchClientBuilder.buildClient();
}
@Test
} |
I would reinitiate a GET request on the JobSchedule after the REPLACE/PUT request and then assert the metadata. | public void canCRUDJobSchedule() throws Exception {
String jobScheduleId = getStringIdWithUserNamePrefix("-JobSchedule-canCRUD");
PoolInformation poolInfo = new PoolInformation();
poolInfo.setPoolId(poolId);
Schedule schedule = new Schedule().setDoNotRunUntil(now()).setDoNotRunAfter(now().plusHours(5)).setStartWindow(Duration.ofDays(5));
JobSpecification spec = new JobSpecification(poolInfo).setPriority(100);
batchClient.createJobSchedule(new BatchJobScheduleCreateOptions(jobScheduleId, schedule, spec));
try {
Assertions.assertTrue(batchClient.jobScheduleExists(jobScheduleId));
BatchJobSchedule jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assertions.assertNotNull(jobSchedule);
Assertions.assertEquals(jobScheduleId, jobSchedule.getId());
Assertions.assertEquals((Integer) 100, jobSchedule.getJobSpecification().getPriority());
if(getTestMode() == TestMode.RECORD) {
Assertions.assertTrue(jobSchedule.getSchedule().getDoNotRunAfter().compareTo(now()) > 0);
}
RequestOptions listOptions = new RequestOptions();
listOptions.addQueryParam("$filter", String.format("id eq '%s'", jobScheduleId));
PagedIterable<BatchJobSchedule> jobSchedules = batchClient.listJobSchedules(listOptions).mapPage(bodyItemValue -> bodyItemValue.toObject(BatchJobSchedule.class));
Assert.assertNotNull(jobSchedules);
boolean found = false;
for (BatchJobSchedule batchJobSchedule: jobSchedules) {
if (batchJobSchedule.getId().equals(jobScheduleId)) {
found = true;
}
}
Assert.assertTrue(found);
List<MetadataItem> metadataList = new ArrayList<>();
metadataList.add(new MetadataItem("name1", "value1"));
metadataList.add(new MetadataItem("name2", "value2"));
jobSchedule.setMetadata(metadataList);
batchClient.replaceJobSchedule(jobScheduleId, jobSchedule);
List<MetadataItem> retrievedMetadata = jobSchedule.getMetadata();
Assertions.assertNotNull(retrievedMetadata, "Metadata in jobSchedule should not be null");
Assertions.assertTrue(retrievedMetadata.containsAll(metadataList), "jobSchedule metadata does not match expected");
Assertions.assertTrue(retrievedMetadata.size() > 0 && retrievedMetadata.get(0).getValue().equals("value1"), "jobSchedule metadata does not contain the specific item at index 0 with value 'value1'");
LinkedList<MetadataItem> metadata = new LinkedList<MetadataItem>();
metadata.add((new MetadataItem("key1", "value1")));
BatchJobScheduleUpdateOptions jobScheduleUpdateOptions = new BatchJobScheduleUpdateOptions();
jobScheduleUpdateOptions.setMetadata(metadata);
batchClient.updateJobSchedule(jobScheduleId, jobScheduleUpdateOptions);
jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertTrue(jobSchedule.getMetadata().size() == 1);
Assert.assertTrue(jobSchedule.getMetadata().get(0).getName().equals("key1"));
Assert.assertEquals((Integer) 100, jobSchedule.getJobSpecification().getPriority());
batchClient.deleteJobSchedule(jobScheduleId);
try {
jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertTrue("Shouldn't be here, the jobschedule should be deleted", true);
} catch (HttpResponseException err) {
if (err.getResponse().getStatusCode() != 404) {
throw err;
}
}
Thread.sleep(1* 1000);
} finally {
try {
batchClient.deleteJobSchedule(jobScheduleId);
} catch (Exception e) {
}
}
} | List<MetadataItem> retrievedMetadata = jobSchedule.getMetadata(); | public void canCRUDJobSchedule() throws Exception {
String jobScheduleId = getStringIdWithUserNamePrefix("-JobSchedule-canCRUD");
PoolInformation poolInfo = new PoolInformation();
poolInfo.setPoolId(poolId);
Schedule schedule = new Schedule().setDoNotRunUntil(now()).setDoNotRunAfter(now().plusHours(5)).setStartWindow(Duration.ofDays(5));
JobSpecification spec = new JobSpecification(poolInfo).setPriority(100);
batchClient.createJobSchedule(new BatchJobScheduleCreateOptions(jobScheduleId, schedule, spec));
try {
Assertions.assertTrue(batchClient.jobScheduleExists(jobScheduleId));
BatchJobSchedule jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assertions.assertNotNull(jobSchedule);
Assertions.assertEquals(jobScheduleId, jobSchedule.getId());
Assertions.assertEquals((Integer) 100, jobSchedule.getJobSpecification().getPriority());
if(getTestMode() == TestMode.RECORD) {
Assertions.assertTrue(jobSchedule.getSchedule().getDoNotRunAfter().compareTo(now()) > 0);
}
RequestOptions listOptions = new RequestOptions();
listOptions.addQueryParam("$filter", String.format("id eq '%s'", jobScheduleId));
PagedIterable<BatchJobSchedule> jobSchedules = batchClient.listJobSchedules(listOptions).mapPage(bodyItemValue -> bodyItemValue.toObject(BatchJobSchedule.class));
Assert.assertNotNull(jobSchedules);
boolean found = false;
for (BatchJobSchedule batchJobSchedule: jobSchedules) {
if (batchJobSchedule.getId().equals(jobScheduleId)) {
found = true;
}
}
Assert.assertTrue(found);
List<MetadataItem> metadataList = new ArrayList<>();
metadataList.add(new MetadataItem("name1", "value1"));
metadataList.add(new MetadataItem("name2", "value2"));
jobSchedule.setMetadata(metadataList);
batchClient.replaceJobSchedule(jobScheduleId, jobSchedule);
jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertTrue(jobSchedule.getMetadata().size() == 2);
Assert.assertTrue(jobSchedule.getMetadata().get(1).getValue().equals("value2"));
LinkedList<MetadataItem> metadata = new LinkedList<MetadataItem>();
metadata.add((new MetadataItem("key1", "value1")));
BatchJobScheduleUpdateOptions jobScheduleUpdateOptions = new BatchJobScheduleUpdateOptions();
jobScheduleUpdateOptions.setMetadata(metadata);
batchClient.updateJobSchedule(jobScheduleId, jobScheduleUpdateOptions);
jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertTrue(jobSchedule.getMetadata().size() == 1);
Assert.assertTrue(jobSchedule.getMetadata().get(0).getName().equals("key1"));
Assert.assertEquals((Integer) 100, jobSchedule.getJobSpecification().getPriority());
batchClient.deleteJobSchedule(jobScheduleId);
try {
jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertTrue("Shouldn't be here, the jobschedule should be deleted", true);
} catch (HttpResponseException err) {
if (err.getResponse().getStatusCode() != 404) {
throw err;
}
}
Thread.sleep(1* 1000);
} finally {
try {
batchClient.deleteJobSchedule(jobScheduleId);
} catch (Exception e) {
}
}
} | class JobScheduleTests extends BatchServiceClientTestBase {
static BatchPool livePool;
static String poolId;
@Override
protected void beforeTest() {
super.beforeTest();
poolId = getStringIdWithUserNamePrefix("-testpool");
if(getTestMode() == TestMode.RECORD) {
try {
livePool = createIfNotExistIaaSPool(poolId);
} catch (Exception e) {
e.printStackTrace();
}
Assertions.assertNotNull(livePool);
}
}
@Test
@Test
public void canUpdateJobScheduleState() throws Exception {
String jobScheduleId = getStringIdWithUserNamePrefix("-JobSchedule-updateJobScheduleState");
PoolInformation poolInfo = new PoolInformation();
poolInfo.setPoolId(poolId);
JobSpecification spec = new JobSpecification(poolInfo).setPriority(100);
Schedule schedule = new Schedule().setDoNotRunUntil(now()).setDoNotRunAfter(now().plusHours(5)).setStartWindow(Duration.ofDays(5));
batchClient.createJobSchedule(new BatchJobScheduleCreateOptions(jobScheduleId, schedule, spec));
try {
BatchJobSchedule jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertEquals(JobScheduleState.ACTIVE, jobSchedule.getState());
batchClient.disableJobSchedule(jobScheduleId);
jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertEquals(JobScheduleState.DISABLED, jobSchedule.getState());
batchClient.enableJobSchedule(jobScheduleId);
jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertEquals(JobScheduleState.ACTIVE, jobSchedule.getState());
batchClient.terminateJobSchedule(jobScheduleId);
jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertTrue(jobSchedule.getState() == JobScheduleState.TERMINATING || jobSchedule.getState() == JobScheduleState.COMPLETED);
Thread.sleep(2 * 1000);
jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertEquals(JobScheduleState.COMPLETED, jobSchedule.getState());
batchClient.deleteJobSchedule(jobScheduleId);
try {
jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertTrue("Shouldn't be here, the jobschedule should be deleted", true);
} catch (HttpResponseException err) {
if (err.getResponse().getStatusCode() != 404) {
throw err;
}
}
}
finally {
try {
batchClient.deleteJobSchedule(jobScheduleId);
}
catch (Exception e) {
}
}
}
} | class JobScheduleTests extends BatchServiceClientTestBase {
static BatchPool livePool;
static String poolId;
@Override
protected void beforeTest() {
super.beforeTest();
poolId = getStringIdWithUserNamePrefix("-testpool");
if(getTestMode() == TestMode.RECORD) {
try {
livePool = createIfNotExistIaaSPool(poolId);
} catch (Exception e) {
e.printStackTrace();
}
Assertions.assertNotNull(livePool);
}
}
@Test
@Test
public void canUpdateJobScheduleState() throws Exception {
String jobScheduleId = getStringIdWithUserNamePrefix("-JobSchedule-updateJobScheduleState");
PoolInformation poolInfo = new PoolInformation();
poolInfo.setPoolId(poolId);
JobSpecification spec = new JobSpecification(poolInfo).setPriority(100);
Schedule schedule = new Schedule().setDoNotRunUntil(now()).setDoNotRunAfter(now().plusHours(5)).setStartWindow(Duration.ofDays(5));
batchClient.createJobSchedule(new BatchJobScheduleCreateOptions(jobScheduleId, schedule, spec));
try {
BatchJobSchedule jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertEquals(JobScheduleState.ACTIVE, jobSchedule.getState());
batchClient.disableJobSchedule(jobScheduleId);
jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertEquals(JobScheduleState.DISABLED, jobSchedule.getState());
batchClient.enableJobSchedule(jobScheduleId);
jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertEquals(JobScheduleState.ACTIVE, jobSchedule.getState());
batchClient.terminateJobSchedule(jobScheduleId);
jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertTrue(jobSchedule.getState() == JobScheduleState.TERMINATING || jobSchedule.getState() == JobScheduleState.COMPLETED);
Thread.sleep(2 * 1000);
jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertEquals(JobScheduleState.COMPLETED, jobSchedule.getState());
batchClient.deleteJobSchedule(jobScheduleId);
try {
jobSchedule = batchClient.getJobSchedule(jobScheduleId);
Assert.assertTrue("Shouldn't be here, the jobschedule should be deleted", true);
} catch (HttpResponseException err) {
if (err.getResponse().getStatusCode() != 404) {
throw err;
}
}
}
finally {
try {
batchClient.deleteJobSchedule(jobScheduleId);
}
catch (Exception e) {
}
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.