id
stringlengths 29
30
| content
stringlengths 152
2.6k
|
|---|---|
codereview_new_java_data_1022
|
public void can_do_query_params_and_headers() {
@Test
public void post_with_json_serialization_works() {
JavalinTest.test((server, client) -> {
- server.post("/hello", ctx -> ctx.result(ctx.<MyJavaClass>bodyAsClass(MyJavaClass.class).field1));
Response response = client.post("/hello", new MyJavaClass("v1", "v2"));
assertThat(response.body().string()).isEqualTo("v1");
});
Why is this required?
public void can_do_query_params_and_headers() {
@Test
public void post_with_json_serialization_works() {
JavalinTest.test((server, client) -> {
+ server.post("/hello", ctx -> ctx.result(ctx.bodyAsClass(MyJavaClass.class).field1));
Response response = client.post("/hello", new MyJavaClass("v1", "v2"));
assertThat(response.body().string()).isEqualTo("v1");
});
|
codereview_new_java_data_1023
|
public static void main(String[] args) {
JsonMapper gsonMapper = new JsonMapper() {
@NotNull
@Override
- public String toJsonString(@NotNull Object obj, Type type) {
- return gson.toJson(obj);
}
@NotNull
What is the purpose of this?
public static void main(String[] args) {
JsonMapper gsonMapper = new JsonMapper() {
@NotNull
@Override
+ public String toJsonString(@NotNull Object obj, @NotNull Type type) {
+ return gson.toJson(obj, type);
}
@NotNull
|
codereview_new_java_data_1028
|
import io.javalin.event.HandlerMetaInfo;
import io.javalin.event.JavalinEvent;
import io.javalin.event.WsHandlerMetaInfo;
-import io.javalin.http.*;
import io.javalin.http.sse.SseClient;
import io.javalin.http.sse.SseHandler;
import io.javalin.jetty.JavalinJettyServlet;
You should configure IntelliJ to not group into star imports.
import io.javalin.event.HandlerMetaInfo;
import io.javalin.event.JavalinEvent;
import io.javalin.event.WsHandlerMetaInfo;
+import io.javalin.http.Context;
+import io.javalin.http.ExceptionHandler;
+import io.javalin.http.Handler;
+import io.javalin.http.HandlerType;
+import io.javalin.http.HttpCode;
+import io.javalin.http.JavalinServlet;
import io.javalin.http.sse.SseClient;
import io.javalin.http.sse.SseHandler;
import io.javalin.jetty.JavalinJettyServlet;
|
codereview_new_java_data_1285
|
-package com.example.fromjava;
-
-class ByteBuffer
-{
- ByteBuffer() {}
- public ByteBuffer get(byte[] dst) { return ByteBuffer(); }
-}
Since you need type resolution anyway, wouldn't it make sense to use the JDK-provided class and method?
|
codereview_new_java_data_1286
|
-package com.example.fromjava;
-
-class ByteBuffer
-{
- ByteBuffer() {}
- public ByteBuffer get(byte[] dst) { return ByteBuffer(); }
-}
```suggestion
public ByteBuffer get(byte[] dst) { return ByteBuffer(); }
```
|
codereview_new_java_data_1617
|
private void filter(List<Integer> toShow, List<Integer> toHide, boolean displayS
capability != null && capability.getFilesSharingResharing().isFalse();
OCSpace space = mComponentsGetter.getStorageManager().getSpace(mFiles.get(0).getSpaceId(), mAccount.name);
- boolean notAllowSharing = space != null && !space.isPersonal();
if ((!shareViaLinkAllowed && !shareWithUsersAllowed) || !isSingleSelection() ||
- notAllowResharing || onlyAvailableOffline || notAllowSharing) {
toHide.add(R.id.action_share_file);
} else {
toShow.add(R.id.action_share_file);
```suggestion
boolean notPersonalSpace = space != null && !space.isPersonal();
```
private void filter(List<Integer> toShow, List<Integer> toHide, boolean displayS
capability != null && capability.getFilesSharingResharing().isFalse();
OCSpace space = mComponentsGetter.getStorageManager().getSpace(mFiles.get(0).getSpaceId(), mAccount.name);
+ boolean notPersonalSpace = space != null && !space.isPersonal();
if ((!shareViaLinkAllowed && !shareWithUsersAllowed) || !isSingleSelection() ||
+ notAllowResharing || onlyAvailableOffline || notPersonalSpace) {
toHide.add(R.id.action_share_file);
} else {
toShow.add(R.id.action_share_file);
|
codereview_new_java_data_1622
|
public void afterTextChanged(Editable editable) {
fileToUpload.add(filePath);
@NotNull Lazy<TransfersViewModel> transfersViewModelLazy = inject(TransfersViewModel.class);
TransfersViewModel transfersViewModel = transfersViewModelLazy.getValue();
- transfersViewModel.uploadFilesFromSystem(getAccount().name, fileToUpload, mUploadPath, null);
finish();
}
inputLayout.setErrorEnabled(error != null);
This one will fail when personal becomes an actual space in https://github.com/owncloud/android/issues/3919
To take care once when the PR is merged
public void afterTextChanged(Editable editable) {
fileToUpload.add(filePath);
@NotNull Lazy<TransfersViewModel> transfersViewModelLazy = inject(TransfersViewModel.class);
TransfersViewModel transfersViewModel = transfersViewModelLazy.getValue();
+ transfersViewModel.uploadFilesFromSystem(getAccount().name, fileToUpload, mUploadPath, mPersonalSpaceId);
finish();
}
inputLayout.setErrorEnabled(error != null);
|
codereview_new_java_data_1623
|
protected ResultCode doInBackground(Object[] params) {
account.name,
filesToUpload,
uploadPath,
- null
);
uploadFilesFromSystemUseCase.execute(useCaseParams);
fullTempPath = null;
This one will fail when personal becomes an actual space in https://github.com/owncloud/android/issues/3919
To take care once when the PR is merged
protected ResultCode doInBackground(Object[] params) {
account.name,
filesToUpload,
uploadPath,
+ spaceId
);
uploadFilesFromSystemUseCase.execute(useCaseParams);
fullTempPath = null;
|
codereview_new_java_data_1631
|
private boolean anyFileSynchronizingLookingIntoWorkers() {
private boolean anyFileSynchronizingLookingIntoFilesSync() {
boolean synchronizing = false;
- if (!mFiles.isEmpty()) {
for (int i = 0; !synchronizing && i < mFilesSync.size(); i++) {
- for (int j = 0; !synchronizing && j < mFiles.size(); j++) {
- if (mFilesSync.get(i).getFileId() == mFiles.get(j).getId()) {
- synchronizing = mFilesSync.get(i).isSynchronizing();
- }
- }
}
}
return synchronizing;
AFAIK, if we check the mFilesSync, we have the file info too. So, I would say we don't need to compare both lists. We need to check if there is any file syncing on the mFilesSync, right? And we don't need to check that for every single file, if we find 1 that is syncing, we can return true, no need to keep checking more files, it could reduce performance when there are a lot of files, right?
Let me know what do u think 🍻
private boolean anyFileSynchronizingLookingIntoWorkers() {
private boolean anyFileSynchronizingLookingIntoFilesSync() {
boolean synchronizing = false;
+ if (!mFilesSync.isEmpty()) {
for (int i = 0; !synchronizing && i < mFilesSync.size(); i++) {
+ synchronizing = mFilesSync.get(i).isSynchronizing();
}
}
return synchronizing;
|
codereview_new_java_data_1643
|
public void sendDownloadedFile(OCFile ocFile) {
}
}
- public void sendDownloadedFile(List<OCFile> ocFiles) {
if (!ocFiles.isEmpty()) {
Intent sendIntent = makeActionSendIntent(ocFiles);
// Show dialog, without the own app
This one could work
```suggestion
public void sendDownloadedFiles(List<OCFile> ocFiles) {
```
public void sendDownloadedFile(OCFile ocFile) {
}
}
+ public void sendDownloadedFiles(List<OCFile> ocFiles) {
if (!ocFiles.isEmpty()) {
Intent sendIntent = makeActionSendIntent(ocFiles);
// Show dialog, without the own app
|
codereview_new_java_data_1644
|
private boolean onFileActionChosen(int menuId) {
if (!filesAreDown(checkedFiles)) { // Download the file
((FileDisplayActivity) mContainerActivity).startDownloadForSending(checkedFiles.get(0));
} else {
- mContainerActivity.getFileOperationsHelper().sendDownloadedFile(checkedFiles);
}
return true;
}
@abelgardep https://github.com/owncloud/android/pull/3638/files/4f8f192987c4f39e2003928e7e1566641d00b7be#r858889730
Now this is here. Before, it was in a switch of actions on only one file. Now, it is in the switch of actions on a batch of files
private boolean onFileActionChosen(int menuId) {
if (!filesAreDown(checkedFiles)) { // Download the file
((FileDisplayActivity) mContainerActivity).startDownloadForSending(checkedFiles.get(0));
} else {
+ mContainerActivity.getFileOperationsHelper().sendDownloadedFiles(checkedFiles);
}
return true;
}
|
codereview_new_java_data_1717
|
public interface ApolloDisposable {
void addListener(Listener listener);
- void removeCancellationListener(Listener listener);
interface Listener {
- void onCancelled();
}
}
Maybe `onDisposed` (or `onDispose`) would make more sense?
public interface ApolloDisposable {
void addListener(Listener listener);
+ void removeListener(Listener listener);
interface Listener {
+ void onDisposed();
}
}
|
codereview_new_java_data_1718
|
public interface ApolloCallback<D extends Operation.Data> {
/**
- * Gets called when GraphQL response is received and parsed successfully. D
*
* @param response the GraphQL response
*/
```suggestion
* Gets called when GraphQL response is received and parsed successfully.
```
public interface ApolloCallback<D extends Operation.Data> {
/**
+ * Gets called when GraphQL response is received and parsed successfully.
*
* @param response the GraphQL response
*/
|
codereview_new_java_data_1719
|
private NetworkInterceptor(Call.Factory callFactory, String serverUrl) {
@Nullable @Override public MediaType contentType() {
return MediaType.parse(httpRequest.getBody().getContentType());
}
@Override public void writeTo(@NotNull BufferedSink bufferedSink) throws IOException {
httpRequest.getBody().writeTo(bufferedSink);
```suggestion
@Override public long contentLength() {
return httpRequest.getBody().getContentLength();
}
```
private NetworkInterceptor(Call.Factory callFactory, String serverUrl) {
@Nullable @Override public MediaType contentType() {
return MediaType.parse(httpRequest.getBody().getContentType());
}
+ @Override public long contentLength() {
+ return httpRequest.getBody().getContentLength();
+ }
@Override public void writeTo(@NotNull BufferedSink bufferedSink) throws IOException {
httpRequest.getBody().writeTo(bufferedSink);
|
codereview_new_java_data_3298
|
private void checkMemoryMB() {
/** Returns a {@link Sorter} configured with the given {@link Options}. */
public static ExternalSorter create(Options options) {
checkArgument(
- options.getSorterType() == Options.SorterType.NATIVE, "Only Native sorter is supported");
return NativeExternalSorter.create(options);
}
```suggestion
options.getSorterType() == Options.SorterType.NATIVE, "NativeExternalSorter is the only supported external sorter");
```
private void checkMemoryMB() {
/** Returns a {@link Sorter} configured with the given {@link Options}. */
public static ExternalSorter create(Options options) {
checkArgument(
+ options.getSorterType() == Options.SorterType.NATIVE, "NativeExternalSorter is the only supported external sorter");
return NativeExternalSorter.create(options);
}
|
codereview_new_java_data_3305
|
import org.apache.beam.sdk.transforms.SerializableFunction;
import org.apache.beam.sdk.transforms.display.DisplayData;
import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
-import org.apache.beam.sdk.values.KV;
import org.joda.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
unused import ?
import org.apache.beam.sdk.transforms.SerializableFunction;
import org.apache.beam.sdk.transforms.display.DisplayData;
import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
import org.joda.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
|
codereview_new_java_data_3318
|
private void advance() {
.mapToObj(i -> new ArrayList<>())
.collect(Collectors.toList());
- // When predicates are applied, a sources containing a key may have no values after filtering.
// Sources containing minKey are by default known to be NONEMPTY. Once all sources are
// consumed, if all are known to be empty, the key group can be dropped.
List<KeyGroupOutputSize> valueOutputSizes =
```suggestion
// When predicates are applied, a source containing a key may have no values after filtering.
```
private void advance() {
.mapToObj(i -> new ArrayList<>())
.collect(Collectors.toList());
+ // When a predicate is applied, a source containing a key may have no values after filtering.
// Sources containing minKey are by default known to be NONEMPTY. Once all sources are
// consumed, if all are known to be empty, the key group can be dropped.
List<KeyGroupOutputSize> valueOutputSizes =
|
codereview_new_java_data_3320
|
int leastNumBuckets() {
private <V> Map<ResourceId, BucketMetadata<?, ?, V>> fetchMetadata(List<String> directories) {
final int total = directories.size();
- final Map<ResourceId, BucketMetadata<?, ?, V>> md = new ConcurrentHashMap<>();
int start = 0;
while (start < total) {
directories.stream()
.skip(start)
.limit(batchSize)
.map(dir -> FileSystems.matchNewResource(dir, true))
.parallel()
- .forEach(dir -> md.put(dir, BucketMetadata.get(dir)));
start += batchSize;
}
- return md;
}
private <V> SourceMetadata<V> getSourceMetadata(
rename to something more descriptive?
int leastNumBuckets() {
private <V> Map<ResourceId, BucketMetadata<?, ?, V>> fetchMetadata(List<String> directories) {
final int total = directories.size();
+ final Map<ResourceId, BucketMetadata<?, ?, V>> metadata = new ConcurrentHashMap<>();
int start = 0;
while (start < total) {
directories.stream()
.skip(start)
.limit(batchSize)
.map(dir -> FileSystems.matchNewResource(dir, true))
.parallel()
+ .forEach(dir -> metadata.put(dir, BucketMetadata.get(dir)));
start += batchSize;
}
+ return metadata;
}
private <V> SourceMetadata<V> getSourceMetadata(
|
codereview_new_java_data_3468
|
public abstract class PresentationCompiler {
* Returns token informations from presentation compiler.
*
*/
- public abstract CompletableFuture<List<Integer>> semanticTokens(
- VirtualFileParams params,
- List<String> capableTypes,
- List<String> capableModifiers);
/**
* Returns code completions for the given source position.
```suggestion
public abstract CompletableFuture<List<Integer>> semanticTokens(VirtualFileParams params);
```
as I said above, we don't need to send types and modifiers.
public abstract class PresentationCompiler {
* Returns token informations from presentation compiler.
*
*/
+ public abstract CompletableFuture<List<Integer>> semanticTokens(VirtualFileParams params);
/**
* Returns code completions for the given source position.
|
codereview_new_java_data_3582
|
public static void runMain(String[] args) throws Exception {
LoadResult loadResult = load();
if (loadResult.millMainMethod.isPresent()) {
if (!MillEnv.millJvmOptsAlreadyApplied() && MillEnv.millJvmOptsFile().exists()) {
-// System.err.println("Warning: Settings from file `" + propOptsFile + "` are currently ignored.");
System.err.println("Launching Mill as sub-process ...");
int exitVal = launchMillAsSubProcess(args);
System.exit(exitVal);
Is this commented line here on purpose?
public static void runMain(String[] args) throws Exception {
LoadResult loadResult = load();
if (loadResult.millMainMethod.isPresent()) {
if (!MillEnv.millJvmOptsAlreadyApplied() && MillEnv.millJvmOptsFile().exists()) {
System.err.println("Launching Mill as sub-process ...");
int exitVal = launchMillAsSubProcess(args);
System.exit(exitVal);
|
codereview_new_java_data_3663
|
public static void execute(String[] args) {
System.out.println(prop);
}
}
- } else if (key.equals("-args")) {
// get all properties
StringBuffer prop = new StringBuffer("");
for (Map.Entry<Object, Object> entry : config.entrySet()) {
字符串应该写在equals前面
public static void execute(String[] args) {
System.out.println(prop);
}
}
+ } else if ("-args".equals(key)) {
// get all properties
StringBuffer prop = new StringBuffer("");
for (Map.Entry<Object, Object> entry : config.entrySet()) {
|
codereview_new_java_data_3804
|
-package test.flix;
-
-public interface TestInterface {
-}
We have had some trouble with tests depending on Java sources.
Perhaps you could simply use:
- java.lang.Object
- java.io.Serializable
- java.lang.Cloneable
|
codereview_new_java_data_4379
|
public boolean pushAggregation(Aggregation aggregation) {
expressions.add((BoundAggregate<?, ?>) bound);
} else {
LOG.info(
- "Skipping aggregate pushdown: AggregateFunc {} can't be converted to iceberg Expression",
aggregateFunc);
return false;
}
nit: `to iceberg Expression` -> `to Iceberg expression` or simply `to Iceberg`.
public boolean pushAggregation(Aggregation aggregation) {
expressions.add((BoundAggregate<?, ?>) bound);
} else {
LOG.info(
+ "Skipping aggregate pushdown: AggregateFunc {} can't be converted to iceberg expression",
aggregateFunc);
return false;
}
|
codereview_new_java_data_4380
|
public void testSanitizeStringFallback() {
"2022-04-29T23:70:51-07:00",
"2022-04-29T23:49:51.123456+100:00")) {
String sanitizedFilter = ExpressionUtil.toSanitizedString(Expressions.equal("test", filter));
- Assert.assertTrue(
- "Invalid date time string should use default sanitize method",
- filterPattern.matcher(sanitizedFilter).matches());
}
}
rather than using `assertTrue` I think `Assertions.assertThat(filterPattern.matcher(sanitizedFilter)).matches();` would be better, because it will provide additional details when that assertion ever fails: `Expecting java.util.regex.Matcher[pattern=^test = \(hash-[0-9a-fA-F]{8}\)$ region=0,22 lastmatch=] to match.`
public void testSanitizeStringFallback() {
"2022-04-29T23:70:51-07:00",
"2022-04-29T23:49:51.123456+100:00")) {
String sanitizedFilter = ExpressionUtil.toSanitizedString(Expressions.equal("test", filter));
+ Assertions.assertThat(filterPattern.matcher(sanitizedFilter)).matches();
}
}
|
codereview_new_java_data_4381
|
public boolean canDeleteWhere(Filter[] filters) {
}
private boolean selectsPartitions(Expression expr) {
- return table().specs().values().stream()
- .allMatch(spec -> ExpressionUtil.selectsPartitions(expr, spec, isCaseSensitive()));
}
// a metadata delete is possible iff matching files can be deleted entirely
It is a similar trick to what we do in `SparkScanBuilder`.
public boolean canDeleteWhere(Filter[] filters) {
}
private boolean selectsPartitions(Expression expr) {
+ return ExpressionUtil.selectsPartitions(expr, table(), isCaseSensitive());
}
// a metadata delete is possible iff matching files can be deleted entirely
|
codereview_new_java_data_4384
|
private DataFile buildDataFileFromAction(Action action, Table table) {
InputFile file = deltaLakeFileIO.newInputFile(fullFilePath);
if (!file.exists()) {
throw new NotFoundException(
- "The file %s does not exist in the Delta Lake table at %s",
fullFilePath, deltaTableLocation);
}
I think checking file existence explicitly here can make the code logic more clear and have more reasonable error message
private DataFile buildDataFileFromAction(Action action, Table table) {
InputFile file = deltaLakeFileIO.newInputFile(fullFilePath);
if (!file.exists()) {
throw new NotFoundException(
+ "File %s is referenced in the metadata of Delta Lake table at %s, but cannot be found in the storage",
fullFilePath, deltaTableLocation);
}
|
codereview_new_java_data_4387
|
public class RESTSessionCatalog extends BaseSessionCatalog
implements Configurable<Configuration>, Closeable {
private static final Logger LOG = LoggerFactory.getLogger(RESTSessionCatalog.class);
private static final String REST_METRICS_REPORTING_ENABLED = "rest-metrics-reporting-enabled";
- private static final String REST_SNAPSHOT_LOADING_MODE = "rest-snapshot-loading-mode";
private static final List<String> TOKEN_PREFERENCE_ORDER =
ImmutableList.of(
OAuth2Properties.ID_TOKEN_TYPE,
I think this should just be `snapshot-loading-mode`.
The reason why we added the `rest-` prefix for metrics reporting is that we report metrics in multiple ways, one of which is to always report through the REST interface. Because you can set the metrics reporter and choose whether to send REST metrics separately, we needed to distinguish the option. But that's not a problem for snapshot loading.
public class RESTSessionCatalog extends BaseSessionCatalog
implements Configurable<Configuration>, Closeable {
private static final Logger LOG = LoggerFactory.getLogger(RESTSessionCatalog.class);
private static final String REST_METRICS_REPORTING_ENABLED = "rest-metrics-reporting-enabled";
+ private static final String REST_SNAPSHOT_LOADING_MODE = "snapshot-loading-mode";
private static final List<String> TOKEN_PREFERENCE_ORDER =
ImmutableList.of(
OAuth2Properties.ID_TOKEN_TYPE,
|
codereview_new_java_data_4389
|
public void testDropBranchDoesNotExist() {
}
@Test
- public void testDropBranchFailesForTag() throws NoSuchTableException {
String tagName = "b1";
Table table = insertRows();
table.manageSnapshots().createTag(tagName, table.currentSnapshot().snapshotId()).commit();
```suggestion
public void testDropBranchFailsForTag() throws NoSuchTableException {
```
public void testDropBranchDoesNotExist() {
}
@Test
+ public void testDropBranchFailsForTag() throws NoSuchTableException {
String tagName = "b1";
Table table = insertRows();
table.manageSnapshots().createTag(tagName, table.currentSnapshot().snapshotId()).commit();
|
codereview_new_java_data_4390
|
public static Map<Integer, String> indexQuotedNameById(Schema schema) {
}
/**
- * convert partition spec to Spark type
*
- * @param spec
- * @return
*/
public static StructType convert(PartitionSpec spec) {
return convert(new Schema(spec.partitionType().asNestedType().asStructType().fields()));
nit:
```java
/**
* Convert a {@link PartitionSpec} to a {@link DataType Spark type}.
*
* @param spec a iceberg PartitionSpec
* @return the equivalent Spark type
*/
```
public static Map<Integer, String> indexQuotedNameById(Schema schema) {
}
/**
+ * Convert a {@link PartitionSpec} to a {@link DataType Spark type}.
*
+ * @param spec iceberg PartitionSpec
+ * @return {@link StructType}
*/
public static StructType convert(PartitionSpec spec) {
return convert(new Schema(spec.partitionType().asNestedType().asStructType().fields()));
|
codereview_new_java_data_4391
|
public static Map<Integer, String> indexQuotedNameById(Schema schema) {
Function<String, String> quotingFunc = name -> String.format("`%s`", name.replace("`", "``"));
return TypeUtil.indexQuotedNameById(schema.asStruct(), quotingFunc);
}
-
- /**
- * Convert a {@link PartitionSpec} to a {@link DataType Spark type}.
- *
- * @param spec iceberg PartitionSpec
- * @return {@link StructType}
- * @throws IllegalArgumentException if the type cannot be converted
- */
- public static StructType convert(PartitionSpec spec) {
- return convert(new Schema(spec.partitionType().asNestedType().asStructType().fields()));
- }
}
I think the general project direction has been to avoid adding public method unless we need it in many places. This is converting from a partition spec type to spark type, and should be fairly limited, so would suggest inline.
I personally think , if anything, adding public method convert(StructType) to spark StructType would be more useful, and will cover this case here.
Would like to see what @aokolnychyi @RussellSpitzer think as well.
Also, is it more convoluted than needed? Could it be?
```
convert(new Schema(partType.partitionType().fields())))
```
public static Map<Integer, String> indexQuotedNameById(Schema schema) {
Function<String, String> quotingFunc = name -> String.format("`%s`", name.replace("`", "``"));
return TypeUtil.indexQuotedNameById(schema.asStruct(), quotingFunc);
}
}
|
codereview_new_java_data_4392
|
public void filter(Filter[] filters) {
.collect(Collectors.toList());
LOG.info(
- "{}/{} tasks for table {} matched runtime file filter",
filteredTasks.size(),
tasks().size(),
- table().name());
resetTasks(filteredTasks);
}
should we include concerned table name as well in log
public void filter(Filter[] filters) {
.collect(Collectors.toList());
LOG.info(
+ "{} of {} task(s) for table {} matched runtime file filter with {} location(s)",
filteredTasks.size(),
tasks().size(),
+ table().name(),
+ fileLocations.size());
resetTasks(filteredTasks);
}
|
codereview_new_java_data_4393
|
private HttpClientConfigurations loadHttpClientConfigurations(
return httpClientConfigurations;
} catch (NoSuchMethodException e) {
throw new IllegalArgumentException(
- String.format(
- "Cannot initialize HttpClientConfigurations Implementation %s: %s",
- impl, e.getMessage()),
- e);
} catch (ClassCastException e) {
throw new IllegalArgumentException(
String.format(
- "Cannot initialize HttpClientConfigurations, %s does not implement HttpClientConfigurations: %s",
- impl, e.getMessage()),
e);
}
}
no need to add `e.getMessage` since `e` is already included in the cause.
private HttpClientConfigurations loadHttpClientConfigurations(
return httpClientConfigurations;
} catch (NoSuchMethodException e) {
throw new IllegalArgumentException(
+ String.format("Cannot initialize HttpClientConfigurations Implementation %s", impl), e);
} catch (ClassCastException e) {
throw new IllegalArgumentException(
String.format(
+ "Cannot initialize HttpClientConfigurations, %s does not implement HttpClientConfigurations",
+ impl),
e);
}
}
|
codereview_new_java_data_4394
|
private HttpClientConfigurations loadHttpClientConfigurations(
return httpClientConfigurations;
} catch (NoSuchMethodException e) {
throw new IllegalArgumentException(
- String.format(
- "Cannot initialize HttpClientConfigurations Implementation %s: %s",
- impl, e.getMessage()),
- e);
} catch (ClassCastException e) {
throw new IllegalArgumentException(
String.format(
- "Cannot initialize HttpClientConfigurations, %s does not implement HttpClientConfigurations: %s",
- impl, e.getMessage()),
e);
}
}
no need to add `e.getMessage` since `e` is already included in the cause.
private HttpClientConfigurations loadHttpClientConfigurations(
return httpClientConfigurations;
} catch (NoSuchMethodException e) {
throw new IllegalArgumentException(
+ String.format("Cannot initialize HttpClientConfigurations Implementation %s", impl), e);
} catch (ClassCastException e) {
throw new IllegalArgumentException(
String.format(
+ "Cannot initialize HttpClientConfigurations, %s does not implement HttpClientConfigurations",
+ impl),
e);
}
}
|
codereview_new_java_data_4396
|
private Pair<Table, Long> load(Identifier ident) throws NoSuchTableException {
Preconditions.checkArgument(
tagSnapshot != null, "Cannot find snapshot associated with tag name: %s", tag);
return Pair.of(table, tagSnapshot.snapshotId());
}
-
- return Pair.of(table, null);
}
private Pair<String, List<String>> parseIdent(Identifier ident) {
nit: I think we can avoid this line change by putting it in the else clause
private Pair<Table, Long> load(Identifier ident) throws NoSuchTableException {
Preconditions.checkArgument(
tagSnapshot != null, "Cannot find snapshot associated with tag name: %s", tag);
return Pair.of(table, tagSnapshot.snapshotId());
+ } else {
+ return Pair.of(table, null);
}
}
private Pair<String, List<String>> parseIdent(Identifier ident) {
|
codereview_new_java_data_4397
|
private Table loadFromPathIdentifier(PathIdentifier ident) {
Preconditions.checkArgument(
tagSnapshot != null, "Cannot find snapshot associated with tag name: %s", tag);
return new SparkTable(table, tagSnapshot.snapshotId(), !cacheEnabled);
}
-
- return new SparkTable(table, snapshotId, !cacheEnabled);
}
private Identifier namespaceToIdentifier(String[] namespace) {
nit: I think we can avoid this line change by putting it in the else clause
private Table loadFromPathIdentifier(PathIdentifier ident) {
Preconditions.checkArgument(
tagSnapshot != null, "Cannot find snapshot associated with tag name: %s", tag);
return new SparkTable(table, tagSnapshot.snapshotId(), !cacheEnabled);
+ } else {
+ return new SparkTable(table, snapshotId, !cacheEnabled);
}
}
private Identifier namespaceToIdentifier(String[] namespace) {
|
codereview_new_java_data_4443
|
public void onBindViewHolder(@NonNull Holder holder, int position) {
.fitCenter()
.dontAnimate())
.into(holder.imageView);
-
}
@Nullable
```suggestion
}
```
public void onBindViewHolder(@NonNull Holder holder, int position) {
.fitCenter()
.dontAnimate())
.into(holder.imageView);
}
@Nullable
|
codereview_new_java_data_4444
|
public void onCreateContextMenu(ContextMenu menu, View v, ContextMenu.ContextMen
recyclerViewFeeds.setLayoutManager(layoutManagerFeeds);
adapterFeeds = new HorizontalFeedListAdapter((MainActivity) getActivity()) {
-
@Override
public void onCreateContextMenu(ContextMenu contextMenu, View view,
ContextMenu.ContextMenuInfo contextMenuInfo) {
```suggestion
adapterFeeds = new HorizontalFeedListAdapter((MainActivity) getActivity()) {
@Override
```
public void onCreateContextMenu(ContextMenu menu, View v, ContextMenu.ContextMen
recyclerViewFeeds.setLayoutManager(layoutManagerFeeds);
adapterFeeds = new HorizontalFeedListAdapter((MainActivity) getActivity()) {
@Override
public void onCreateContextMenu(ContextMenu contextMenu, View view,
ContextMenu.ContextMenuInfo contextMenuInfo) {
|
codereview_new_java_data_4445
|
public MainActivityStarter withAddToBackStack() {
public MainActivityStarter withFragmentLoaded(String fragmentName) {
intent.putExtra(EXTRA_FRAGMENT_TAG, fragmentName);
intent.putExtra(EXTRA_OPEN_DRAWER, true);
return this;
}
These should be two different methods. One that loads a fragment and one that opens the drawer.
public MainActivityStarter withAddToBackStack() {
public MainActivityStarter withFragmentLoaded(String fragmentName) {
intent.putExtra(EXTRA_FRAGMENT_TAG, fragmentName);
+ return withDrawerOpen();
+ }
+
+ private MainActivityStarter withDrawerOpen() {
intent.putExtra(EXTRA_OPEN_DRAWER, true);
return this;
}
|
codereview_new_java_data_4446
|
public class DownloadServiceCallbacksImpl implements DownloadServiceCallbacks {
@Override
public PendingIntent getNotificationContentIntent(Context context) {
- Intent intent = new Intent(context, MainActivity.class);
- intent.putExtra(MainActivityStarter.EXTRA_FRAGMENT_TAG, CompletedDownloadsFragment.TAG);
return PendingIntent.getActivity(context,
- R.id.pending_intent_download_service_notification, intent,
PendingIntent.FLAG_UPDATE_CURRENT | (Build.VERSION.SDK_INT >= 23 ? PendingIntent.FLAG_IMMUTABLE : 0));
}
You can now use `MainActivityStarter.withFragmentLoaded()` here to avoid having to handle the specific extras.
public class DownloadServiceCallbacksImpl implements DownloadServiceCallbacks {
@Override
public PendingIntent getNotificationContentIntent(Context context) {
+ MainActivityStarter starter = new MainActivityStarter(context)
+ .withFragmentLoaded(CompletedDownloadsFragment.TAG);
return PendingIntent.getActivity(context,
+ R.id.pending_intent_download_service_notification, starter.getIntent(),
PendingIntent.FLAG_UPDATE_CURRENT | (Build.VERSION.SDK_INT >= 23 ? PendingIntent.FLAG_IMMUTABLE : 0));
}
|
codereview_new_java_data_4447
|
public void onItemClick(int position) {
@Override
public boolean onItemLongClick(int position) {
if (position < navAdapter.getFragmentTags().size()) {
- DrawerPreferencesDialog.show(getContext(), () -> {
- navAdapter.notifyDataSetChanged();
- });
return true;
} else {
contextPressedItem = flatItemList.get(position - navAdapter.getSubscriptionOffset());
Please revert this formatting change. That reduces the diff and makes it easier to use `git blame` later.
public void onItemClick(int position) {
@Override
public boolean onItemLongClick(int position) {
if (position < navAdapter.getFragmentTags().size()) {
+ DrawerPreferencesDialog.show(getContext(), () -> navAdapter.notifyDataSetChanged());
return true;
} else {
contextPressedItem = flatItemList.get(position - navAdapter.getSubscriptionOffset());
|
codereview_new_java_data_4448
|
public static class FeedSettingsPreferenceFragment extends PreferenceFragmentCom
private static final String PREF_FEED_PLAYBACK_SPEED = "feedPlaybackSpeed";
private static final String PREF_AUTO_SKIP = "feedAutoSkip";
private static final String PREF_TAGS = "tags";
- private static final String PREF_EDIT_FEED_URL = "editFeedUrl";
private Feed feed;
private Disposable disposable;
I don't think this is needed anymore
public static class FeedSettingsPreferenceFragment extends PreferenceFragmentCom
private static final String PREF_FEED_PLAYBACK_SPEED = "feedPlaybackSpeed";
private static final String PREF_AUTO_SKIP = "feedAutoSkip";
private static final String PREF_TAGS = "tags";
private Feed feed;
private Disposable disposable;
|
codereview_new_java_data_4449
|
public View onCreateView(@NonNull LayoutInflater inflater, ViewGroup container,
emptyView.setIcon(R.drawable.ic_history);
emptyView.setTitle(R.string.no_history_head_label);
emptyView.setMessage(R.string.no_history_label);
- swipeActions = new SwipeActions(this, getFragmentTag()).attachTo(recyclerView);
swipeActions.setFilter(getFilter());
return root;
}
I don't think creating a new SwipeActions object is needed here. You can just remove the `detach` line.
public View onCreateView(@NonNull LayoutInflater inflater, ViewGroup container,
emptyView.setIcon(R.drawable.ic_history);
emptyView.setTitle(R.string.no_history_head_label);
emptyView.setMessage(R.string.no_history_label);
swipeActions.setFilter(getFilter());
return root;
}
|
codereview_new_java_data_4450
|
public interface SwipeAction {
String TOGGLE_PLAYED = "MARK_PLAYED";
String REMOVE_FROM_QUEUE = "REMOVE_FROM_QUEUE";
String DELETE = "DELETE";
- String REMOVE_FROM_HISTORY = "DELETE";
String getId();
This should be a different constant from actual episode deletion.
public interface SwipeAction {
String TOGGLE_PLAYED = "MARK_PLAYED";
String REMOVE_FROM_QUEUE = "REMOVE_FROM_QUEUE";
String DELETE = "DELETE";
+ String REMOVE_FROM_HISTORY = "REMOVE_FROM_HISTORY";
String getId();
|
codereview_new_java_data_4451
|
public static Future<?> deleteFromPlaybackHistory(FeedItem feedItem) {
return dbExec.submit(() -> {
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
- adapter.removeFromPlaybackHistory(feedItem);
adapter.close();
EventBus.getDefault().post(PlaybackHistoryEvent.listUpdated());
});
Instead of adding a new method, something like this should also work:
```
media.setPlaybackCompletionDate(new Date(0));
adapter.setFeedMediaPlaybackCompletionDate(media);
```
public static Future<?> deleteFromPlaybackHistory(FeedItem feedItem) {
return dbExec.submit(() -> {
PodDBAdapter adapter = PodDBAdapter.getInstance();
adapter.open();
+
+ feedItem.getMedia().setPlaybackCompletionDate(new Date(0));
+ adapter.setFeedMediaPlaybackCompletionDate(feedItem.getMedia());
+
adapter.close();
EventBus.getDefault().post(PlaybackHistoryEvent.listUpdated());
});
|
codereview_new_java_data_4452
|
public void performAction(FeedItem item, Fragment fragment, FeedItemFilter filte
DBWriter.deleteFromPlaybackHistory(item);
((MainActivity) fragment.requireActivity())
- .showSnackbarAbovePlayer(
- R.string.removed_history_label,
- Snackbar.LENGTH_LONG
- ).setAction(
- fragment.getString(R.string.undo),
- v -> DBWriter.addItemToPlaybackHistory(
- item.getMedia(),
- playbackCompletionDate
- ));
}
@Override
This can be a lot shorter :)
```suggestion
.showSnackbarAbovePlayer(R.string.removed_history_label, Snackbar.LENGTH_LONG)
.setAction(fragment.getString(R.string.undo),
v -> DBWriter.addItemToPlaybackHistory(item.getMedia(), playbackCompletionDate));
```
public void performAction(FeedItem item, Fragment fragment, FeedItemFilter filte
DBWriter.deleteFromPlaybackHistory(item);
((MainActivity) fragment.requireActivity())
+ .showSnackbarAbovePlayer(R.string.removed_history_label, Snackbar.LENGTH_LONG)
+ .setAction(fragment.getString(R.string.undo),
+ v -> DBWriter.addItemToPlaybackHistory(item.getMedia(), playbackCompletionDate));
}
@Override
|
codereview_new_java_data_4453
|
public static Future<?> clearPlaybackHistory() {
}
public static Future<?> deleteFromPlaybackHistory(FeedItem feedItem) {
- return dbExec.submit(() -> {
- PodDBAdapter adapter = PodDBAdapter.getInstance();
- adapter.open();
-
- feedItem.getMedia().setPlaybackCompletionDate(new Date(0));
- adapter.setFeedMediaPlaybackCompletionDate(feedItem.getMedia());
-
- adapter.close();
- EventBus.getDefault().post(PlaybackHistoryEvent.listUpdated());
- });
}
/**
Can also use the generic method:
```suggestion
public static Future<?> deleteFromPlaybackHistory(FeedItem feedItem) {
return addItemToPlaybackHistory(media, new Date(0));
}
```
public static Future<?> clearPlaybackHistory() {
}
public static Future<?> deleteFromPlaybackHistory(FeedItem feedItem) {
+ return addItemToPlaybackHistory(media, new Date(0));
}
/**
|
codereview_new_java_data_4454
|
public synchronized void init() {
}
}
- public boolean isRefresed() {
- return refresed;
- }
-
- public void setRefresed(boolean refresed) {
- this.refresed = refresed;
- }
-
@Subscribe(threadMode = ThreadMode.MAIN)
public void onEventMainThread(PlaybackServiceEvent event) {
if (event.action == PlaybackServiceEvent.Action.SERVICE_STARTED) {
It looks like this is a leftover from an earlier experiment. The method is never used.
public synchronized void init() {
}
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onEventMainThread(PlaybackServiceEvent event) {
if (event.action == PlaybackServiceEvent.Action.SERVICE_STARTED) {
|
codereview_new_java_data_4455
|
public Dialog onCreateDialog(@Nullable Bundle savedInstanceState) {
.setTitle(getString(R.string.chapters_label))
.setView(onCreateView(getLayoutInflater()))
.setPositiveButton(getString(R.string.close_label), null) //dismisses
- .setNeutralButton("Reset", null)
- .show();
dialog.show();
dialog.getButton(DialogInterface.BUTTON_NEUTRAL).setOnClickListener(v -> {
controller = new PlaybackController(getActivity()) {
Hmm, this feels like a hacky workaround. I would prefer to not call `.show()` in `onCreateDialog`. There must be another method that is called when the dialog is actually shown.
public Dialog onCreateDialog(@Nullable Bundle savedInstanceState) {
.setTitle(getString(R.string.chapters_label))
.setView(onCreateView(getLayoutInflater()))
.setPositiveButton(getString(R.string.close_label), null) //dismisses
+ .setNeutralButton(getString(R.string.refresh_label), null)
+ .create();
dialog.show();
dialog.getButton(DialogInterface.BUTTON_NEUTRAL).setOnClickListener(v -> {
controller = new PlaybackController(getActivity()) {
|
codereview_new_java_data_4456
|
public boolean onMenuItemClick(MenuItem item) {
ConfirmationDialog conDialog = new ConfirmationDialog(
getActivity(),
- de.danoeh.antennapod.ui.statistics.R.string.clear_playback_history,
- de.danoeh.antennapod.ui.statistics.R.string.clear_playback_history_msg) {
@Override
public void onConfirmButtonPressed(DialogInterface dialog) {
I think it should be enough to use the already imported R class:
```suggestion
ConfirmationDialog conDialog = new ConfirmationDialog(getActivity(),
R.string.clear_playback_history, R.string.clear_playback_history_msg) {
```
public boolean onMenuItemClick(MenuItem item) {
ConfirmationDialog conDialog = new ConfirmationDialog(
getActivity(),
+ R.string.clear_history_label,
+ R.string.clear_playback_history_msg) {
@Override
public void onConfirmButtonPressed(DialogInterface dialog) {
|
codereview_new_java_data_4457
|
private static void upgrade(int oldVersion, Context context) {
if (feedCounterSetting.equals("0")) {
prefs.edit().putString(UserPreferences.PREF_DRAWER_FEED_COUNTER, "2").apply();
}
- }
- if (oldVersion < 2070095) {
- long value = Long.parseLong(SleepTimerPreferences.lastTimerValue());
- TimeUnit unit = SleepTimerPreferences.UNITS[SleepTimerPreferences.lastTimerTimeUnit()];
- SleepTimerPreferences.setLastTimer(
- String.valueOf(unit.toMinutes(value))
- );
}
}
}
Please add this to the `if` statement above - it should be migrated when switching to the next version, not when switching to the current version.
private static void upgrade(int oldVersion, Context context) {
if (feedCounterSetting.equals("0")) {
prefs.edit().putString(UserPreferences.PREF_DRAWER_FEED_COUNTER, "2").apply();
}
+ SharedPreferences sleepTimerPreferences = context.getSharedPreferences(SleepTimerPreferences.PREF_NAME, Context.MODE_PRIVATE);
+ String PREF_TIME_UNIT = "LastTimeUnit";
+ int DEFAULT_TIME_UNIT = 1;
+ TimeUnit[] UNITS = { TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS };
+ long value = Long.parseLong(SleepTimerPreferences.lastTimerValue());
+ TimeUnit unit = UNITS[sleepTimerPreferences.getInt(PREF_TIME_UNIT, DEFAULT_TIME_UNIT)];
+ SleepTimerPreferences.setLastTimer(String.valueOf(unit.toMinutes(value)));
}
}
}
|
codereview_new_java_data_4458
|
private static void upgrade(int oldVersion, Context context) {
if (feedCounterSetting.equals("0")) {
prefs.edit().putString(UserPreferences.PREF_DRAWER_FEED_COUNTER, "2").apply();
}
- }
- if (oldVersion < 2070095) {
- long value = Long.parseLong(SleepTimerPreferences.lastTimerValue());
- TimeUnit unit = SleepTimerPreferences.UNITS[SleepTimerPreferences.lastTimerTimeUnit()];
- SleepTimerPreferences.setLastTimer(
- String.valueOf(unit.toMinutes(value))
- );
}
}
}
This is now the only usage of `SleepTimerPreferences.lastTimerTimeUnit()`. Please instead move the code here and remove it from the class, so that the "main code" does not have unused legacy code. All legacy code should be bundled here in the upgrader.
private static void upgrade(int oldVersion, Context context) {
if (feedCounterSetting.equals("0")) {
prefs.edit().putString(UserPreferences.PREF_DRAWER_FEED_COUNTER, "2").apply();
}
+ SharedPreferences sleepTimerPreferences = context.getSharedPreferences(SleepTimerPreferences.PREF_NAME, Context.MODE_PRIVATE);
+ String PREF_TIME_UNIT = "LastTimeUnit";
+ int DEFAULT_TIME_UNIT = 1;
+ TimeUnit[] UNITS = { TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS };
+ long value = Long.parseLong(SleepTimerPreferences.lastTimerValue());
+ TimeUnit unit = UNITS[sleepTimerPreferences.getInt(PREF_TIME_UNIT, DEFAULT_TIME_UNIT)];
+ SleepTimerPreferences.setLastTimer(String.valueOf(unit.toMinutes(value)));
}
}
}
|
codereview_new_java_data_4459
|
private static void upgrade(int oldVersion, Context context) {
SharedPreferences sleepTimerPreferences =
context.getSharedPreferences(SleepTimerPreferences.PREF_NAME, Context.MODE_PRIVATE);
- String prefTimeUnit = "LastTimeUnit";
- int defaultTimeUnit = 1;
TimeUnit[] timeUnits = { TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS };
-
long value = Long.parseLong(SleepTimerPreferences.lastTimerValue());
- TimeUnit unit = timeUnits[sleepTimerPreferences.getInt(prefTimeUnit, defaultTimeUnit)];
-
SleepTimerPreferences.setLastTimer(String.valueOf(unit.toMinutes(value)));
}
}
Can be shortened:
```suggestion
TimeUnit[] timeUnits = { TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS };
long value = Long.parseLong(SleepTimerPreferences.lastTimerValue());
TimeUnit unit = timeUnits[sleepTimerPreferences.getInt("LastTimeUnit", 1)];
SleepTimerPreferences.setLastTimer(String.valueOf(unit.toMinutes(value)));
```
private static void upgrade(int oldVersion, Context context) {
SharedPreferences sleepTimerPreferences =
context.getSharedPreferences(SleepTimerPreferences.PREF_NAME, Context.MODE_PRIVATE);
TimeUnit[] timeUnits = { TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS };
long value = Long.parseLong(SleepTimerPreferences.lastTimerValue());
+ TimeUnit unit = timeUnits[sleepTimerPreferences.getInt("LastTimeUnit", 1)];
SleepTimerPreferences.setLastTimer(String.valueOf(unit.toMinutes(value)));
}
}
|
codereview_new_java_data_4460
|
public void testInit() {
private Playable writeTestPlayable(String downloadUrl, String fileUrl) {
Feed f = new Feed(0, null, "f", "l", "d", null, null, null, null, "i", null, null, "l", false);
FeedPreferences prefs = new FeedPreferences(f.getId(), false, FeedPreferences.AutoDeleteAction.NO,
- VolumeAdaptionSetting.OFF, FeedPreferences.SkipInboxSetting.NO, null, null);
f.setPreferences(prefs);
f.setItems(new ArrayList<>());
FeedItem i = new FeedItem(0, "t", "i", "l", new Date(), FeedItem.UNPLAYED, f);
I would rename the setting to something like `NewEpisodesAction.NOTHING` and `NewEpisodesAction.ADD_TO_INBOX`. Then we can also add something like `NewEpisodesAction.ENQUEUE` (see #5246) later.
public void testInit() {
private Playable writeTestPlayable(String downloadUrl, String fileUrl) {
Feed f = new Feed(0, null, "f", "l", "d", null, null, null, null, "i", null, null, "l", false);
FeedPreferences prefs = new FeedPreferences(f.getId(), false, FeedPreferences.AutoDeleteAction.NO,
+ VolumeAdaptionSetting.OFF, FeedPreferences.NewEpisodesAction.NOTHING, null, null);
f.setPreferences(prefs);
f.setItems(new ArrayList<>());
FeedItem i = new FeedItem(0, "t", "i", "l", new Date(), FeedItem.UNPLAYED, f);
|
codereview_new_java_data_4461
|
public void performAction(FeedItem item, Fragment fragment, FeedItemFilter filte
@Override
public boolean willRemove(FeedItemFilter filter, FeedItem item) {
- return filter.showPlayed || filter.showNew;
}
}
I think you need to check based on the item here. Otherwise the items are no longer swiped out completely when filtering by "unplayed" and expecting the toggle action to remove from screen. So, when the item is new, do what you do now. Otherwise do what was done before.
public void performAction(FeedItem item, Fragment fragment, FeedItemFilter filte
@Override
public boolean willRemove(FeedItemFilter filter, FeedItem item) {
+ if (item.getPlayState() == item.NEW) {
+ return filter.showPlayed || filter.showNew;
+ } else {
+ return filter.showUnplayed || filter.showPlayed || filter.showNew;
+ }
}
}
|
codereview_new_java_data_4462
|
public void performAction(FeedItem item, Fragment fragment, FeedItemFilter filte
@Override
public boolean willRemove(FeedItemFilter filter, FeedItem item) {
- if (item.getPlayState() == item.NEW) {
return filter.showPlayed || filter.showNew;
} else {
return filter.showUnplayed || filter.showPlayed || filter.showNew;
```suggestion
if (item.getPlayState() == FeedItem.NEW) {
```
public void performAction(FeedItem item, Fragment fragment, FeedItemFilter filte
@Override
public boolean willRemove(FeedItemFilter filter, FeedItem item) {
+ if (item.getPlayState() == FeedItem.NEW) {
return filter.showPlayed || filter.showNew;
} else {
return filter.showUnplayed || filter.showPlayed || filter.showNew;
|
codereview_new_java_data_4463
|
public View onCreateView(@NonNull LayoutInflater inflater, ViewGroup container,
recyclerView.setRecycledViewPool(((MainActivity) getActivity()).getRecycledViewPool());
setupLoadMoreScrollListener();
- swipeActions = new SwipeActions(this, TAG).attachTo(recyclerView);
swipeActions.setFilter(getFilter());
RecyclerView.ItemAnimator animator = recyclerView.getItemAnimator();
The old code was correct here. Child classes of EpisodesListFragment are used in playback history, all episodes list, and inbox. Using the method call ensures that we store the tag of the respective child fragment.
public View onCreateView(@NonNull LayoutInflater inflater, ViewGroup container,
recyclerView.setRecycledViewPool(((MainActivity) getActivity()).getRecycledViewPool());
setupLoadMoreScrollListener();
+ swipeActions = new SwipeActions(this, getFragmentTag()).attachTo(recyclerView);
swipeActions.setFilter(getFilter());
RecyclerView.ItemAnimator animator = recyclerView.getItemAnimator();
|
codereview_new_java_data_4464
|
private static void upgrade(int oldVersion, Context context) {
if (oldVersion < 2050000) {
prefs.edit().putBoolean(UserPreferences.PREF_PAUSE_PLAYBACK_FOR_FOCUS_LOSS, true).apply();
}
- if (oldVersion < 2070000) {
// Migrate drawer feed counter setting to reflect removal of
// "unplayed and in inbox" (0), by changing it to "unplayed" (2)
- if (UserPreferences.getFeedCounterSetting().id == 0) {
- UserPreferences.setFeedCounterSetting("2");
}
}
}
2.7 is already in beta, so the version this will land in is 2.8 :)
private static void upgrade(int oldVersion, Context context) {
if (oldVersion < 2050000) {
prefs.edit().putBoolean(UserPreferences.PREF_PAUSE_PLAYBACK_FOR_FOCUS_LOSS, true).apply();
}
+ if (oldVersion < 2080000) {
// Migrate drawer feed counter setting to reflect removal of
// "unplayed and in inbox" (0), by changing it to "unplayed" (2)
+ String feedCounterSetting = prefs.getString("prefDrawerFeedIndicator", "1");
+ if (feedCounterSetting.equals("0")) {
+ prefs.edit().putString("prefDrawerFeedIndicator", "2").apply();
}
}
}
|
codereview_new_java_data_4465
|
public final Map<Long, Integer> getFeedCounters(FeedCounter setting, long... fee
whereRead = KEY_READ + "=" + FeedItem.NEW;
break;
case SHOW_UNPLAYED:
- whereRead = KEY_READ + "=" + FeedItem.UNPLAYED;
break;
case SHOW_DOWNLOADED:
whereRead = KEY_DOWNLOADED + "=1";
After this migration, the "unplayed" counter should take over the behavior of "new and unplayed", because episodes in the inbox are always unplayed. Our data model here is a bit confusing because an episode can be either in the inbox or unplayed, but for a user, episodes in the inbox are definitely unplayed as well.
public final Map<Long, Integer> getFeedCounters(FeedCounter setting, long... fee
whereRead = KEY_READ + "=" + FeedItem.NEW;
break;
case SHOW_UNPLAYED:
+ whereRead = "(" + KEY_READ + "=" + FeedItem.NEW +
+ " OR " + KEY_READ + "=" + FeedItem.UNPLAYED + ")";
break;
case SHOW_DOWNLOADED:
whereRead = KEY_DOWNLOADED + "=1";
|
codereview_new_java_data_4466
|
public void statusChanged(PlaybackServiceMediaPlayer.PSMPInfo newInfo) {
break;
}
- // Trigger an update to the Quick Settings tile (see QuickSettingsTileService.java)
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
- TileService.requestListeningState(
- getApplicationContext(),
- new ComponentName(getApplicationContext(), QuickSettingsTileService.class.getName())
- );
}
IntentUtils.sendLocalBroadcast(getApplicationContext(), ACTION_PLAYER_STATUS_CHANGED);
This can be more compact.
```suggestion
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
TileService.requestListeningState(getApplicationContext(),
new ComponentName(getApplicationContext(), QuickSettingsTileService.class));
}
```
public void statusChanged(PlaybackServiceMediaPlayer.PSMPInfo newInfo) {
break;
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
+ TileService.requestListeningState(getApplicationContext(),
+ new ComponentName(getApplicationContext(), QuickSettingsTileService.class));
}
IntentUtils.sendLocalBroadcast(getApplicationContext(), ACTION_PLAYER_STATUS_CHANGED);
|
codereview_new_java_data_4467
|
public ClassLoader getClassLoader(String context) {
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
- }).collect(Collectors.toList()).toArray(new URL[] {}), ClassLoader.getSystemClassLoader());
});
}
}
```suggestion
}).toArray(URL[]::new), ClassLoader.getSystemClassLoader());
```
Outside of the scope and not changed in this PR but noticed that the collect step can be skipped here.
public ClassLoader getClassLoader(String context) {
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
+ }).toArray(URL[]::new), ClassLoader.getSystemClassLoader());
});
}
}
|
codereview_new_java_data_4468
|
public enum Property {
+ " a comma or other reserved characters in a URI use standard URI hex"
+ " encoding. For example replace commas with %2C.",
"1.6.0"),
- INSTANCE_VOLUMES_CONFIG("instance.volume.config.", null, PropertyType.PREFIX,
"Properties in this category are used to provide volume specific overrides to "
+ "the general filesystem client configuration. Properties using this prefix "
+ "should be in the form "
```suggestion
INSTANCE_VOLUME_CONFIG_PREFIX("instance.volume.config.", null, PropertyType.PREFIX,
```
public enum Property {
+ " a comma or other reserved characters in a URI use standard URI hex"
+ " encoding. For example replace commas with %2C.",
"1.6.0"),
+ INSTANCE_VOLUME_CONFIG_PREFIX("instance.volume.config.", null, PropertyType.PREFIX,
"Properties in this category are used to provide volume specific overrides to "
+ "the general filesystem client configuration. Properties using this prefix "
+ "should be in the form "
|
codereview_new_java_data_4469
|
public void createTableWithTableNameLengthLimit()
public void createTableWithBadProperties()
throws AccumuloException, AccumuloSecurityException, TableExistsException {
TableOperations tableOps = accumuloClient.tableOperations();
- String t0 = StringUtils.repeat('a', MAX_TABLE_NAME_LEN - 1);
tableOps.create(t0);
assertTrue(tableOps.exists(t0));
assertThrows(AccumuloException.class,
We typically create table names with something like `String t0 = getUniqueNames(1)[0];
public void createTableWithTableNameLengthLimit()
public void createTableWithBadProperties()
throws AccumuloException, AccumuloSecurityException, TableExistsException {
TableOperations tableOps = accumuloClient.tableOperations();
+ String t0 = getUniqueNames(1)[0];
tableOps.create(t0);
assertTrue(tableOps.exists(t0));
assertThrows(AccumuloException.class,
|
codereview_new_java_data_4470
|
public void testCanRun() {
// ensure this fails with older versions; the oldest supported version is hard-coded here
// to ensure we don't unintentionally break upgrade support; changing this should be a conscious
// decision and this check will ensure we don't overlook it
- // as of 3.0 we will only support upgrades from 2.1
final int oldestSupported = AccumuloDataVersion.ROOT_TABLET_META_CHANGES;
final int currentVersion = AccumuloDataVersion.get();
IntConsumer shouldPass = ServerContext::ensureDataVersionCompatible;
I'm not sure this comment expresses anything that the next line doesn't already express more clearly. As we move forward, the version names in this comment will get stale over time and the comment will need to be constantly updated. I'm not sure it's any more helpful than having the explicit assignment line right after.
public void testCanRun() {
// ensure this fails with older versions; the oldest supported version is hard-coded here
// to ensure we don't unintentionally break upgrade support; changing this should be a conscious
// decision and this check will ensure we don't overlook it
final int oldestSupported = AccumuloDataVersion.ROOT_TABLET_META_CHANGES;
final int currentVersion = AccumuloDataVersion.get();
IntConsumer shouldPass = ServerContext::ensureDataVersionCompatible;
|
codereview_new_java_data_4471
|
public class AccumuloDataVersion {
*
* <ul>
* <li>version (9) RFiles and wal crypto serialization changes. RFile summary data in 2.0.0</li>
- * <li>version (8) RFile index (ACCUMULO-1124) and wal tracking in ZK</li>
* <li>version (7) also reflects the addition of a replication table in 1.7.0
* <li>version (6) reflects the addition of a separate root table (ACCUMULO-1481) in 1.6.0 -
* <li>version (5) moves delete file markers for the metadata table into the root tablet
```suggestion
* <li>version (8) RFile index (ACCUMULO-1124) and wal tracking in ZK in 1.8.0</li>
```
public class AccumuloDataVersion {
*
* <ul>
* <li>version (9) RFiles and wal crypto serialization changes. RFile summary data in 2.0.0</li>
+ * <li>version (8) RFile index (ACCUMULO-1124) and wal tracking in ZK in 1.8.0</li>
* <li>version (7) also reflects the addition of a replication table in 1.7.0
* <li>version (6) reflects the addition of a separate root table (ACCUMULO-1481) in 1.6.0 -
* <li>version (5) moves delete file markers for the metadata table into the root tablet
|
codereview_new_java_data_4472
|
public synchronized void upgradeZookeeper(ServerContext context,
int oldestVersion = ROOT_TABLET_META_CHANGES;
if (cv < oldestVersion) {
String oldRelease = dataVersionToReleaseName(oldestVersion);
- throw new UnsupportedOperationException("Upgrading from a version before " + oldRelease
+ " data version (" + oldestVersion + ") is not supported. Upgrade to at least "
+ oldRelease + " before upgrading to " + Constants.VERSION);
}
Instead of having another place where this is hard-coded, can just get the first upgrader mapping key:
```suggestion
int oldestVersion = upgraders.iterator().next().getKey();
```
However, this requires a predictable ordering of the upgraders... which we might already be relying on, and which might be an existing problem. See my other comment on this.
public synchronized void upgradeZookeeper(ServerContext context,
int oldestVersion = ROOT_TABLET_META_CHANGES;
if (cv < oldestVersion) {
String oldRelease = dataVersionToReleaseName(oldestVersion);
+ throw new UnsupportedOperationException("Upgrading from a version less than " + oldRelease
+ " data version (" + oldestVersion + ") is not supported. Upgrade to at least "
+ oldRelease + " before upgrading to " + Constants.VERSION);
}
|
codereview_new_java_data_4473
|
import org.apache.accumulo.core.conf.Property;
/**
- * A {@link VolumeChooser} that selects a volume at random from the list of provided volumes. This
- * class is currently the default volume chooser as set by {@link Property#GENERAL_VOLUME_CHOOSER}.
*
* @since 2.1.0
*/
```suggestion
* A {@link VolumeChooser} that selects a volume at random from the list of provided volumes.
```
If the default ever changes, then we have to remember to update this comment. Users can just check the default value of the property.
import org.apache.accumulo.core.conf.Property;
/**
+ * A {@link VolumeChooser} that selects a volume at random from the list of provided volumes.
*
* @since 2.1.0
*/
|
codereview_new_java_data_4474
|
public enum Property {
+ " summary data, that is not currently in cache, from RFiles.",
"2.0.0"),
TSERV_LAST_LOCATION_MODE("tserver.last.location.mode", "compact", PropertyType.LAST_LOCATION_MODE,
- "Describes how the system will assign tablets initially by defining how the 'last' location is updated."
+ " If 'compact' is the mode, then the system will assign tablets based on the data locality (e.g. the last compaction location)."
+ " If 'assign' is the mode, then tablets will be initially assigned to the last place they were assigned."
+ " If 'unload' is the mode, then tablets will be initially assigned to the last place they were unloaded from (i.e. requires a clean shutdown)."
```suggestion
"Describes how the system will record the 'last' location for tablets, which can be used for assigning them when a cluster restarts."
```
public enum Property {
+ " summary data, that is not currently in cache, from RFiles.",
"2.0.0"),
TSERV_LAST_LOCATION_MODE("tserver.last.location.mode", "compact", PropertyType.LAST_LOCATION_MODE,
+ "Describes how the system will record the 'last' location for tablets, which can be used for assigning them when a cluster restarts."
+ " If 'compact' is the mode, then the system will assign tablets based on the data locality (e.g. the last compaction location)."
+ " If 'assign' is the mode, then tablets will be initially assigned to the last place they were assigned."
+ " If 'unload' is the mode, then tablets will be initially assigned to the last place they were unloaded from (i.e. requires a clean shutdown)."
|
codereview_new_java_data_4475
|
public enum Property {
+ " summary data, that is not currently in cache, from RFiles.",
"2.0.0"),
TSERV_LAST_LOCATION_MODE("tserver.last.location.mode", "compact", PropertyType.LAST_LOCATION_MODE,
- "Describes how the system will assign tablets initially by defining how the 'last' location is updated."
+ " If 'compact' is the mode, then the system will assign tablets based on the data locality (e.g. the last compaction location)."
+ " If 'assign' is the mode, then tablets will be initially assigned to the last place they were assigned."
+ " If 'unload' is the mode, then tablets will be initially assigned to the last place they were unloaded from (i.e. requires a clean shutdown)."
```suggestion
+ " If 'compact' is the mode, then the system will record the location where the tablet's most recent compaction occurred."
```
public enum Property {
+ " summary data, that is not currently in cache, from RFiles.",
"2.0.0"),
TSERV_LAST_LOCATION_MODE("tserver.last.location.mode", "compact", PropertyType.LAST_LOCATION_MODE,
+ "Describes how the system will record the 'last' location for tablets, which can be used for assigning them when a cluster restarts."
+ " If 'compact' is the mode, then the system will assign tablets based on the data locality (e.g. the last compaction location)."
+ " If 'assign' is the mode, then tablets will be initially assigned to the last place they were assigned."
+ " If 'unload' is the mode, then tablets will be initially assigned to the last place they were unloaded from (i.e. requires a clean shutdown)."
|
codereview_new_java_data_4476
|
public enum Property {
+ " summary data, that is not currently in cache, from RFiles.",
"2.0.0"),
TSERV_LAST_LOCATION_MODE("tserver.last.location.mode", "compact", PropertyType.LAST_LOCATION_MODE,
- "Describes how the system will assign tablets initially by defining how the 'last' location is updated."
+ " If 'compact' is the mode, then the system will assign tablets based on the data locality (e.g. the last compaction location)."
+ " If 'assign' is the mode, then tablets will be initially assigned to the last place they were assigned."
+ " If 'unload' is the mode, then tablets will be initially assigned to the last place they were unloaded from (i.e. requires a clean shutdown)."
```suggestion
+ " If 'assign' is the mode, then the most recently assigned location will be recorded."
```
public enum Property {
+ " summary data, that is not currently in cache, from RFiles.",
"2.0.0"),
TSERV_LAST_LOCATION_MODE("tserver.last.location.mode", "compact", PropertyType.LAST_LOCATION_MODE,
+ "Describes how the system will record the 'last' location for tablets, which can be used for assigning them when a cluster restarts."
+ " If 'compact' is the mode, then the system will assign tablets based on the data locality (e.g. the last compaction location)."
+ " If 'assign' is the mode, then tablets will be initially assigned to the last place they were assigned."
+ " If 'unload' is the mode, then tablets will be initially assigned to the last place they were unloaded from (i.e. requires a clean shutdown)."
|
codereview_new_java_data_4477
|
public enum Property {
+ " summary data, that is not currently in cache, from RFiles.",
"2.0.0"),
TSERV_LAST_LOCATION_MODE("tserver.last.location.mode", "compact", PropertyType.LAST_LOCATION_MODE,
- "Describes how the system will assign tablets initially by defining how the 'last' location is updated."
+ " If 'compact' is the mode, then the system will assign tablets based on the data locality (e.g. the last compaction location)."
+ " If 'assign' is the mode, then tablets will be initially assigned to the last place they were assigned."
+ " If 'unload' is the mode, then tablets will be initially assigned to the last place they were unloaded from (i.e. requires a clean shutdown)."
```suggestion
+ " If 'unload' is the mode, then tablets will be initially assigned to the last place they were unloaded from (i.e. requires a clean shutdown)."
```
I'm not sure we need this one at all. This would require a clean shut down in order to freeze the current assignments. And, in the case of that clean shut down, this would be no different than the `assign` mode. If there is not a clean shutdown, then the field would merely be blank... which isn't helpful.
public enum Property {
+ " summary data, that is not currently in cache, from RFiles.",
"2.0.0"),
TSERV_LAST_LOCATION_MODE("tserver.last.location.mode", "compact", PropertyType.LAST_LOCATION_MODE,
+ "Describes how the system will record the 'last' location for tablets, which can be used for assigning them when a cluster restarts."
+ " If 'compact' is the mode, then the system will assign tablets based on the data locality (e.g. the last compaction location)."
+ " If 'assign' is the mode, then tablets will be initially assigned to the last place they were assigned."
+ " If 'unload' is the mode, then tablets will be initially assigned to the last place they were unloaded from (i.e. requires a clean shutdown)."
|
codereview_new_java_data_4478
|
public enum Property {
"Describes how the system will record the 'last' location for tablets, which can be used for assigning them when a cluster restarts."
+ " If 'compaction' is the mode, then the system will record the location where the tablet's most recent compaction occurred."
+ " If 'assignment' is the mode, then the most recently assigned location will be recorded."
- + " Also note that manger.startup.tserver properties might need to be set as well to ensure"
+ " the tserver is available before tablets are initially assigned if the 'last' location is to be used.",
"3.0.0"),
```suggestion
+ " The manager.startup.tserver properties might also need to be set to ensure"
```
public enum Property {
"Describes how the system will record the 'last' location for tablets, which can be used for assigning them when a cluster restarts."
+ " If 'compaction' is the mode, then the system will record the location where the tablet's most recent compaction occurred."
+ " If 'assignment' is the mode, then the most recently assigned location will be recorded."
+ + " The manager.startup.tserver properties might also need to be set to ensure"
+ " the tserver is available before tablets are initially assigned if the 'last' location is to be used.",
"3.0.0"),
|
codereview_new_java_data_4479
|
public static void replaceDatafiles(ServerContext context, KeyExtent extent,
}
TServerInstance self = getTServerInstance(address, zooLock);
- // if the location mode is 'locality'', then preserve the current compaction location in the
// last location value
if ("compaction".equals(context.getConfiguration().get(Property.TSERV_LAST_LOCATION_MODE))) {
tablet.putLocation(self, LocationType.LAST);
```suggestion
// if the location mode is 'compaction', then preserve the current compaction location in the
```
public static void replaceDatafiles(ServerContext context, KeyExtent extent,
}
TServerInstance self = getTServerInstance(address, zooLock);
+ // if the location mode is 'compaction', then preserve the current compaction location in the
// last location value
if ("compaction".equals(context.getConfiguration().get(Property.TSERV_LAST_LOCATION_MODE))) {
tablet.putLocation(self, LocationType.LAST);
|
codereview_new_java_data_4480
|
public enum PropertyType {
+ "config file using '${env:ACCUMULO_HOME}' or similar."),
ABSOLUTEPATH("absolute path",
- x -> x == null || x.trim().isEmpty() || new Path(x.trim()).isAbsolute()
- || x.equals(Property.CONTEXT_CLASSPATH_PROPERTY.getDefaultValue()),
"An absolute filesystem path. The filesystem depends on the property."
+ " This is the same as path, but enforces that its root is explicitly specified."),
I think you can just remove line 113 with no replacement
public enum PropertyType {
+ "config file using '${env:ACCUMULO_HOME}' or similar."),
ABSOLUTEPATH("absolute path",
+ x -> x == null || x.trim().isEmpty() || new Path(x.trim()).isAbsolute(),
"An absolute filesystem path. The filesystem depends on the property."
+ " This is the same as path, but enforces that its root is explicitly specified."),
|
codereview_new_java_data_4481
|
MiniAccumuloConfigImpl initialize() {
// since there is a small amount of memory, check more frequently for majc... setting may not
// be needed in 1.5
mergeProp(Property.TSERV_MAJC_DELAY.getKey(), "3");
- @SuppressWarnings("deprecation")
- Property generalClasspaths = Property.GENERAL_CLASSPATHS;
- mergeProp(generalClasspaths.getKey(), libDir.getAbsolutePath() + "/[^.].*[.]jar");
mergeProp(Property.GC_CYCLE_DELAY.getKey(), "4s");
mergeProp(Property.GC_CYCLE_START.getKey(), "0s");
mergePropWithRandomPort(Property.MANAGER_CLIENTPORT.getKey());
This property can be removed also
MiniAccumuloConfigImpl initialize() {
// since there is a small amount of memory, check more frequently for majc... setting may not
// be needed in 1.5
mergeProp(Property.TSERV_MAJC_DELAY.getKey(), "3");
mergeProp(Property.GC_CYCLE_DELAY.getKey(), "4s");
mergeProp(Property.GC_CYCLE_START.getKey(), "0s");
mergePropWithRandomPort(Property.MANAGER_CLIENTPORT.getKey());
|
codereview_new_java_data_4482
|
public int numArgs() {
}
public static void printClassPath(PrintWriter writer) {
- try {
- writer.print("Accumulo Shell Classpath: \n");
- final String javaClassPath = System.getProperty("java.class.path");
- if (javaClassPath == null) {
- throw new IllegalStateException("java.class.path is not set");
- }
- Arrays.stream(javaClassPath.split(File.pathSeparator)).forEach(classPathUri -> {
- writer.print(classPathUri + "\n");
- });
-
- writer.print("\n");
- } catch (Exception t) {
- throw new RuntimeException(t);
}
}
}
```suggestion
writer.println("Accumulo Shell Classpath:");
```
public int numArgs() {
}
public static void printClassPath(PrintWriter writer) {
+ writer.println("Accumulo Shell Classpath:");
+ final String javaClassPath = System.getProperty("java.class.path");
+ if (javaClassPath == null) {
+ throw new IllegalStateException("java.class.path is not set");
}
+ Arrays.stream(javaClassPath.split(File.pathSeparator)).forEach(writer::println);
+
+ writer.println();
}
}
|
codereview_new_java_data_4483
|
public int numArgs() {
}
public static void printClassPath(PrintWriter writer) {
- try {
- writer.print("Accumulo Shell Classpath: \n");
- final String javaClassPath = System.getProperty("java.class.path");
- if (javaClassPath == null) {
- throw new IllegalStateException("java.class.path is not set");
- }
- Arrays.stream(javaClassPath.split(File.pathSeparator)).forEach(classPathUri -> {
- writer.print(classPathUri + "\n");
- });
-
- writer.print("\n");
- } catch (Exception t) {
- throw new RuntimeException(t);
}
}
}
```suggestion
Arrays.stream(javaClassPath.split(File.pathSeparator)).forEach(classPathUri -> writer.println(classPathUri));
```
public int numArgs() {
}
public static void printClassPath(PrintWriter writer) {
+ writer.println("Accumulo Shell Classpath:");
+ final String javaClassPath = System.getProperty("java.class.path");
+ if (javaClassPath == null) {
+ throw new IllegalStateException("java.class.path is not set");
}
+ Arrays.stream(javaClassPath.split(File.pathSeparator)).forEach(writer::println);
+
+ writer.println();
}
}
|
codereview_new_java_data_4484
|
public int numArgs() {
}
public static void printClassPath(PrintWriter writer) {
- try {
- writer.print("Accumulo Shell Classpath: \n");
- final String javaClassPath = System.getProperty("java.class.path");
- if (javaClassPath == null) {
- throw new IllegalStateException("java.class.path is not set");
- }
- Arrays.stream(javaClassPath.split(File.pathSeparator)).forEach(classPathUri -> {
- writer.print(classPathUri + "\n");
- });
-
- writer.print("\n");
- } catch (Exception t) {
- throw new RuntimeException(t);
}
}
}
I would just catch the checked exceptions here using multi-catch syntax, so you don't rewrap RTEs.
public int numArgs() {
}
public static void printClassPath(PrintWriter writer) {
+ writer.println("Accumulo Shell Classpath:");
+ final String javaClassPath = System.getProperty("java.class.path");
+ if (javaClassPath == null) {
+ throw new IllegalStateException("java.class.path is not set");
}
+ Arrays.stream(javaClassPath.split(File.pathSeparator)).forEach(writer::println);
+
+ writer.println();
}
}
|
codereview_new_java_data_4485
|
public int numArgs() {
}
public static void printClassPath(PrintWriter writer) {
- try {
- writer.print("Accumulo Shell Classpath: \n");
- final String javaClassPath = System.getProperty("java.class.path");
- if (javaClassPath == null) {
- throw new IllegalStateException("java.class.path is not set");
- }
- Arrays.stream(javaClassPath.split(File.pathSeparator)).forEach(classPathUri -> {
- writer.print(classPathUri + "\n");
- });
-
- writer.print("\n");
- } catch (Exception t) {
- throw new RuntimeException(t);
}
}
}
Is there a more specific RTE type that we can use instead of this?
public int numArgs() {
}
public static void printClassPath(PrintWriter writer) {
+ writer.println("Accumulo Shell Classpath:");
+ final String javaClassPath = System.getProperty("java.class.path");
+ if (javaClassPath == null) {
+ throw new IllegalStateException("java.class.path is not set");
}
+ Arrays.stream(javaClassPath.split(File.pathSeparator)).forEach(writer::println);
+
+ writer.println();
}
}
|
codereview_new_java_data_4486
|
public class BlockCacheManagerFactory {
/**
* Get the BlockCacheFactory specified by the property 'tserver.cache.factory.class' using the
- * configured ContextClassLoaderFactory
*
* @param conf accumulo configuration
* @return block cache manager instance
I think this will now use the system class loader. I don't think this is done inside a context, but I am not sure.
public class BlockCacheManagerFactory {
/**
* Get the BlockCacheFactory specified by the property 'tserver.cache.factory.class' using the
+ * System class loader
*
* @param conf accumulo configuration
* @return block cache manager instance
|
codereview_new_java_data_4487
|
public void compactionSelectorTest() throws Exception {
// data to know if there are too many foos.
PluginConfig csc = new PluginConfig(FooSelector.class.getName());
CompactionConfig compactConfig = new CompactionConfig().setSelector(csc);
- compactionTest(compactConfig);
- }
-
- private void compactionTest(CompactionConfig compactConfig) throws Exception {
final String table = getUniqueNames(1)[0];
try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
NewTableConfiguration ntc = new NewTableConfiguration();
SummarizerConfiguration sc1 =
Could inline this method now thats its only used once.
public void compactionSelectorTest() throws Exception {
// data to know if there are too many foos.
PluginConfig csc = new PluginConfig(FooSelector.class.getName());
CompactionConfig compactConfig = new CompactionConfig().setSelector(csc);
final String table = getUniqueNames(1)[0];
+
try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
NewTableConfiguration ntc = new NewTableConfiguration();
SummarizerConfiguration sc1 =
|
codereview_new_java_data_4488
|
private static String extractAuthName(ACL acl) {
}
private static boolean canWrite(final Set<String> users, final List<ACL> acls) {
- return ZooDefs.Ids.OPEN_ACL_UNSAFE.equals(acls) || acls.stream()
- .anyMatch(a -> users.contains(extractAuthName(a)) && a.getPerms() >= ZooDefs.Perms.WRITE);
}
private void validateACLs(ServerContext context) {
The perms are bits - so `>= ZooDefs.Perms.WRITE` will return true if any combination of write, create, delete, admin is set - so if the perm was delete only, this would still return "true" for can write, which seems misleading at best.
I think the test might be better names hasAll and the condition was:
```
.anyMatch(a -> users.contains(extractAuthName(a)) && a.getPerms() == ZooDefs.Perms.ALL;
```
private static String extractAuthName(ACL acl) {
}
private static boolean canWrite(final Set<String> users, final List<ACL> acls) {
+ return acls.stream()
+ .anyMatch(a -> users.contains(extractAuthName(a)) && a.getPerms() == ZooDefs.Perms.ALL);
}
private void validateACLs(ServerContext context) {
|
codereview_new_java_data_4489
|
public static <U> Class<? extends U> loadClass(String className, Class<U> extens
* Retrieve the classloader context from a table's configuration.
*/
public static String tableContext(AccumuloConfiguration conf) {
- return conf.get(conf.resolve(Property.TABLE_CLASSLOADER_CONTEXT));
}
}
You should simplify these calls to `resolve`, if there's only one property. The `resolve` method tries to select the property to use based on what the user has set in the config. However, if there's only one, then there's nothing to `resolve` and you can just call `get` on the property directly.
```suggestion
return conf.get(Property.TABLE_CLASSLOADER_CONTEXT);
```
public static <U> Class<? extends U> loadClass(String className, Class<U> extens
* Retrieve the classloader context from a table's configuration.
*/
public static String tableContext(AccumuloConfiguration conf) {
+ return conf.get(Property.TABLE_CLASSLOADER_CONTEXT);
}
}
|
codereview_new_java_data_4490
|
public Future<?> submit(Runnable task) {
*/
public ScheduledThreadPoolExecutor
createGeneralScheduledExecutorService(AccumuloConfiguration conf) {
- Property prop = conf.resolve(Property.GENERAL_THREADPOOL_SIZE);
- return (ScheduledThreadPoolExecutor) createExecutorService(conf, prop, true);
}
/**
```suggestion
Property prop = Property.GENERAL_THREADPOOL_SIZE;
```
public Future<?> submit(Runnable task) {
*/
public ScheduledThreadPoolExecutor
createGeneralScheduledExecutorService(AccumuloConfiguration conf) {
+ return (ScheduledThreadPoolExecutor) createExecutorService(conf,
+ Property.GENERAL_THREADPOOL_SIZE, true);
}
/**
|
codereview_new_java_data_4491
|
public void test() {
TableConfiguration conf = createMock(TableConfiguration.class);
// Eclipse might show @SuppressWarnings("removal") as unnecessary.
// Eclipse is wrong. See https://bugs.eclipse.org/bugs/show_bug.cgi?id=565271
- expect(conf.resolve(Property.TABLE_CLASSLOADER_CONTEXT))
- .andReturn(Property.TABLE_CLASSLOADER_CONTEXT).anyTimes();
expect(conf.get(Property.TABLE_CLASSLOADER_CONTEXT)).andReturn("").anyTimes();
expect(context.getTableConfiguration(EasyMock.anyObject())).andReturn(conf).anyTimes();
replay(context, conf);
This would be a weird thing to expect. Probably can be removed when the code that we're expecting is updated to avoid the resolution of a single non-deprecated property, and just uses that property directly.
public void test() {
TableConfiguration conf = createMock(TableConfiguration.class);
// Eclipse might show @SuppressWarnings("removal") as unnecessary.
// Eclipse is wrong. See https://bugs.eclipse.org/bugs/show_bug.cgi?id=565271
expect(conf.get(Property.TABLE_CLASSLOADER_CONTEXT)).andReturn("").anyTimes();
expect(context.getTableConfiguration(EasyMock.anyObject())).andReturn(conf).anyTimes();
replay(context, conf);
|
codereview_new_java_data_4492
|
public static String prepareBulkImport(ServerContext manager, final VolumeManage
AccumuloConfiguration serverConfig = manager.getConfiguration();
ExecutorService workers = ThreadPools.getServerThreadPools().createExecutorService(serverConfig,
- serverConfig.resolve(Property.MANAGER_RENAME_THREADS), false);
List<Future<Exception>> results = new ArrayList<>();
for (FileStatus file : mapFiles) {
```suggestion
Property.MANAGER_RENAME_THREADS, false);
```
public static String prepareBulkImport(ServerContext manager, final VolumeManage
AccumuloConfiguration serverConfig = manager.getConfiguration();
ExecutorService workers = ThreadPools.getServerThreadPools().createExecutorService(serverConfig,
+ Property.MANAGER_RENAME_THREADS, false);
List<Future<Exception>> results = new ArrayList<>();
for (FileStatus file : mapFiles) {
|
codereview_new_java_data_4493
|
public void removeInUseLogs(Set<DfsLogger> candidates) {
public void checkIfMinorCompactionNeededForLogs(List<DfsLogger> closedLogs) {
// grab this outside of tablet lock.
- Property prop = tableConfiguration.resolve(Property.TSERV_WAL_MAX_REFERENCED);
- int maxLogs = tableConfiguration.getCount(prop);
String reason = null;
synchronized (this) {
This is another where there's only one property to resolve now. Can just inline the prop variable and get rid of the resolve call.
public void removeInUseLogs(Set<DfsLogger> candidates) {
public void checkIfMinorCompactionNeededForLogs(List<DfsLogger> closedLogs) {
// grab this outside of tablet lock.
+ int maxLogs = tableConfiguration.getCount(Property.TSERV_WAL_MAX_REFERENCED);
String reason = null;
synchronized (this) {
|
codereview_new_java_data_4494
|
public void testVerifyPassword() throws Exception {
assertArrayEquals(password, secretManager.retrievePassword(id));
// Make a second token for the same user
- // Sleep for 1 millisecond to guarantee token is unique
Thread.sleep(100);
Entry<Token<AuthenticationTokenIdentifier>,AuthenticationTokenIdentifier> pair2 =
secretManager.generateToken(principal, cfg);
The comment doesn't match. This is sleeping for 100 milliseconds, right? Other than that mismatch, this seems fine.
```suggestion
// Briefly sleep to guarantee token is unique, since the token is based on the time
Thread.sleep(100);
```
public void testVerifyPassword() throws Exception {
assertArrayEquals(password, secretManager.retrievePassword(id));
// Make a second token for the same user
+ // Briefly sleep to guarantee token is unique, since the token is based on the time
Thread.sleep(100);
Entry<Token<AuthenticationTokenIdentifier>,AuthenticationTokenIdentifier> pair2 =
secretManager.generateToken(principal, cfg);
|
codereview_new_java_data_4495
|
public interface InputParameters {
/**
* @return the id of the tablet being compacted
- * @since 2.1.1
*/
TabletId getTabletId();
This is an API addition. It shouldn't be added in a patch release, according to our semver rules.
public interface InputParameters {
/**
* @return the id of the tablet being compacted
+ * @since 3.0.0
*/
TabletId getTabletId();
|
codereview_new_java_data_4496
|
Collection<Summary> getSummaries(Collection<CompactableFile> files,
/**
* @return the tablet id of the tablet being compacted
- * @since 2.1.1
*/
TabletId getTabletId();
Also public API. So, same comment here.
Collection<Summary> getSummaries(Collection<CompactableFile> files,
/**
* @return the tablet id of the tablet being compacted
+ * @since 3.0.0
*/
TabletId getTabletId();
|
codereview_new_java_data_4497
|
protected FileSKVWriter openWriter(FileOptions options) throws IOException {
}
if (options.dropCacheBehind) {
- // Tell the DataNode that the write ahead log does not need to be cached in the OS page
- // cache
try {
outputStream.setDropBehind(Boolean.TRUE);
LOG.trace("Called setDropBehind(TRUE) for stream writing file {}", options.filename);
copy / paste error? This isn't a write-ahead log.
protected FileSKVWriter openWriter(FileOptions options) throws IOException {
}
if (options.dropCacheBehind) {
+ // Tell the DataNode that the file does not need to be cached in the OS page cache
try {
outputStream.setDropBehind(Boolean.TRUE);
LOG.trace("Called setDropBehind(TRUE) for stream writing file {}", options.filename);
|
codereview_new_java_data_4498
|
public enum Property {
"1.3.5"),
TABLE_ARBITRARY_PROP_PREFIX("table.custom.", null, PropertyType.PREFIX,
"Prefix to be used for user defined arbitrary properties.", "1.7.0"),
- TABLE_MAJC_OUTPUT_DROP_CACHE("table.compaction.major.output.drop.cache", "false",
- PropertyType.BOOLEAN,
- "Setting this property to true will call"
- + "FSDataOutputStream.setDropBehind(true) on the major compaction output stream.",
- "2.1.1"),
TABLE_MAJC_RATIO("table.compaction.major.ratio", "3", PropertyType.FRACTION,
"Minimum ratio of total input size to maximum input RFile size for"
+ " running a major compaction. ",
Do we really need this? Why not just always set it, and let it be cached when read?
Also, this is a user-facing configuration addition (not strictly public API, but analogous in terms of forwards/backwards-compatibility issues and user expectations). We try to avoid those kinds of additions in patch releases. Can we justify adding it in 2.1.1?
public enum Property {
"1.3.5"),
TABLE_ARBITRARY_PROP_PREFIX("table.custom.", null, PropertyType.PREFIX,
"Prefix to be used for user defined arbitrary properties.", "1.7.0"),
TABLE_MAJC_RATIO("table.compaction.major.ratio", "3", PropertyType.FRACTION,
"Minimum ratio of total input size to maximum input RFile size for"
+ " running a major compaction. ",
|
codereview_new_java_data_4499
|
public CachableBuilder fsPath(FileSystem fs, Path dataFile, boolean dropCacheBeh
// cache
try {
is.setDropBehind(Boolean.TRUE);
- } catch (IOException | UnsupportedOperationException e) {
log.debug("setDropBehind not enabled for wal file: {}", dataFile);
}
}
return is;
I'm wondering if it's useful to show the message from the exception in this debug message, at least in the case of IOException.
public CachableBuilder fsPath(FileSystem fs, Path dataFile, boolean dropCacheBeh
// cache
try {
is.setDropBehind(Boolean.TRUE);
+ } catch (UnsupportedOperationException e) {
log.debug("setDropBehind not enabled for wal file: {}", dataFile);
+ } catch (IOException e) {
+ log.debug("IOException setting drop behind for file: {}, msg: {}", dataFile,
+ e.getMessage());
}
}
return is;
|
codereview_new_java_data_4500
|
* </pre>
*
* <p>
- * If migrating code from Connector to AccumuloClient an important difference to consider is that
- * AccumuloClient is closable and Connector was not. Connector uses static resources and therefore
- * creating them is cheap. AccumuloClient attempts to clean up resources on close, so constantly
- * creating them could perform worse than Connector. Therefore, it would be better to create an
- * AccumuloClient and pass it around.
*
* <p>
* AccumuloClient objects are intended to be thread-safe, and can be used by multiple threads.
The Connector used static resources and was cheaper to create that the AccumuloClient. However, ...
* </pre>
*
* <p>
+ * An important difference with the legacy Connector to consider is that Connector reused global
+ * static resources. AccumuloClient, however, attempts to clean up its resources on close. So,
+ * creating many AccumuloClient objects will perform worse than creating many Connectors did.
+ * Therefore, it is suggested to reuse AccumuloClient instances where possible, rather than create
+ * many of them.
*
* <p>
* AccumuloClient objects are intended to be thread-safe, and can be used by multiple threads.
|
codereview_new_java_data_4501
|
* </pre>
*
* <p>
- * If migrating code from Connector to AccumuloClient an important difference to consider is that
- * AccumuloClient is closable and Connector was not. Connector uses static resources and therefore
- * creating them is cheap. AccumuloClient attempts to clean up resources on close, so constantly
- * creating them could perform worse than Connector. Therefore, it would be better to create an
- * AccumuloClient and pass it around.
*
* <p>
* AccumuloClient objects are intended to be thread-safe, and can be used by multiple threads.
Therefore, it is better to ...
* </pre>
*
* <p>
+ * An important difference with the legacy Connector to consider is that Connector reused global
+ * static resources. AccumuloClient, however, attempts to clean up its resources on close. So,
+ * creating many AccumuloClient objects will perform worse than creating many Connectors did.
+ * Therefore, it is suggested to reuse AccumuloClient instances where possible, rather than create
+ * many of them.
*
* <p>
* AccumuloClient objects are intended to be thread-safe, and can be used by multiple threads.
|
codereview_new_java_data_4502
|
public void testEndOfFirstTablet() throws Exception {
verifyData(c, tableName, 333, 333, false);
Map<String,Set<String>> hashes = new HashMap<>();
- hashes.put("0333", new HashSet<>());
- hashes.get("0333").add(h1);
hashes.put("null", new HashSet<>());
verifyMetadata(c, tableName, hashes);
}
Could this be condensed?
```suggestion
hashes.put("0333", Set.of(h1));
```
public void testEndOfFirstTablet() throws Exception {
verifyData(c, tableName, 333, 333, false);
Map<String,Set<String>> hashes = new HashMap<>();
+ hashes.put("0333", Set.of(h1));
hashes.put("null", new HashSet<>());
verifyMetadata(c, tableName, hashes);
}
|
codereview_new_java_data_4503
|
public void testEndOfFirstTablet() throws Exception {
Map<String,Set<String>> hashes = new HashMap<>();
hashes.put("0333", Set.of(h1));
- hashes.put("null", new HashSet<>());
verifyMetadata(c, tableName, hashes);
}
}
```suggestion
hashes.put("null", Set.of());
```
public void testEndOfFirstTablet() throws Exception {
Map<String,Set<String>> hashes = new HashMap<>();
hashes.put("0333", Set.of(h1));
+ hashes.put("null", Set.of());
verifyMetadata(c, tableName, hashes);
}
}
|
codereview_new_java_data_4504
|
public int execute(final String fullCommand, final CommandLine cl, final Shell s
}
}
if (configuration != null) {
- var propsToAdd = configuration.entrySet().stream()
- .filter(entry -> Property.isValidTablePropertyKey(entry.getKey()))
- .collect(Collectors.toMap(Entry::getKey, Entry::getValue));
shellState.getAccumuloClient().namespaceOperations().modifyProperties(namespace,
- properties -> properties.putAll(propsToAdd));
}
return 0;
```suggestion
final Map<String,String> config = configuration;
shellState.getAccumuloClient().namespaceOperations().modifyProperties(namespace,
properties -> config.entrySet().stream()
.filter(entry -> Property.isValidTablePropertyKey(entry.getKey()))
.forEach(entry -> properties.put(entry.getKey(), entry.getValue())));
```
Basically the same suggestion as in #2965
public int execute(final String fullCommand, final CommandLine cl, final Shell s
}
}
if (configuration != null) {
+ final Map<String,String> config = configuration;
shellState.getAccumuloClient().namespaceOperations().modifyProperties(namespace,
+ properties -> config.entrySet().stream()
+ .filter(entry -> Property.isValidTablePropertyKey(entry.getKey()))
+ .forEach(entry -> properties.put(entry.getKey(), entry.getValue())));
}
return 0;
|
codereview_new_java_data_4505
|
public ZooPropLoader(final ZooReaderWriter zrw, final VersionedPropCodec propCod
Stat stat = new Stat();
byte[] bytes = zrw.getData(propStoreKey.getPath(), propStoreWatcher, stat);
- if (bytes.length == 0) {
return new VersionedProperties();
}
VersionedProperties vProps = propCodec.fromBytes(stat.getVersion(), bytes);
Can bytes be null here? I'm assuming not as I think you would just get a NoNodeException which is handled but thought I'd bring it up.
public ZooPropLoader(final ZooReaderWriter zrw, final VersionedPropCodec propCod
Stat stat = new Stat();
byte[] bytes = zrw.getData(propStoreKey.getPath(), propStoreWatcher, stat);
+ if (stat.getDataLength() == 0) {
return new VersionedProperties();
}
VersionedProperties vProps = propCodec.fromBytes(stat.getVersion(), bytes);
|
codereview_new_java_data_4506
|
VersionedProperties transform(final PropStoreKey<?> propStoreKey, final Transfor
}
upgradeNodes = convertDeprecatedProps(propStoreKey, upgradeNodes);
- // todo - here
results = writeConverted(propStoreKey, upgradeNodes);
if (results == null) {
Did you mean to leave this here?
VersionedProperties transform(final PropStoreKey<?> propStoreKey, final Transfor
}
upgradeNodes = convertDeprecatedProps(propStoreKey, upgradeNodes);
+
results = writeConverted(propStoreKey, upgradeNodes);
if (results == null) {
|
codereview_new_java_data_4507
|
public void initializeSecurity(TCredentials itw, String rootuser)
Collections.singleton(NamespacePermission.ALTER_NAMESPACE));
namespacePerms.put(Namespace.ACCUMULO.id(),
Collections.singleton(NamespacePermission.ALTER_TABLE));
- namespacePerms.put(Namespace.DEFAULT.id(), Collections.singleton(NamespacePermission.READ));
try {
// prep parent node of users with root username
The root user shouldn't have this permission by default. This namespace is for user tables, and there's no reason the root user should have read permission on the user tables by default.
public void initializeSecurity(TCredentials itw, String rootuser)
Collections.singleton(NamespacePermission.ALTER_NAMESPACE));
namespacePerms.put(Namespace.ACCUMULO.id(),
Collections.singleton(NamespacePermission.ALTER_TABLE));
try {
// prep parent node of users with root username
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 1